1、Fastdfs的tracker

一、tracker主流程

本文使用FastDFS_v4.06版本。先參考一下這兩篇文章,看看主流程做了哪些事:

http://blog.chinaunix.net/uid-20498361-id-3328763.html

http://yangbajing.blog.chinaunix.net/uid-26786622-id-3146373.html


二、tracker_service_init()

注意:它產生了四個工作線程,這些線程的啓動是通過向它們各自的管道寫數據來進行的。它們會檢測到管道讀事件(通過libevent),從而進一步調用recv_notify_read()

int tracker_service_init()  //其中去除了錯誤處理部分
{
	int result;
	struct tracker_thread_data *pThreadData;
	struct tracker_thread_data *pDataEnd;
	pthread_t tid;
	pthread_attr_t thread_attr;

	if ((result=init_pthread_lock(&tracker_thread_lock)) != 0) //設置線程鎖
	{
		return result;
	}

	if ((result=init_pthread_lock(&lb_thread_lock)) != 0)
	{
		return result;
	}

	if ((result=init_pthread_attr(&thread_attr, g_thread_stack_size)) != 0) //設置線程屬性
	{
	}
//初始化g_free_queue結構,併爲對應的g_mpool分配內存(大小爲256個(block_size+8192),構成鏈表,256爲最大連接數)。
	if ((result=free_queue_init(g_max_connections, TRACKER_MAX_PACKAGE_SIZE,\
                TRACKER_MAX_PACKAGE_SIZE, sizeof(TrackerClientInfo))) != 0)
	{
		return result;
	}
//爲每個線程分配一個線程結構,此處g_work_threads爲4
	g_thread_data = (struct tracker_thread_data *)malloc(sizeof( \
				struct tracker_thread_data) * g_work_threads);
	if (g_thread_data == NULL)
	{
	}

	g_tracker_thread_count = 0;
	pDataEnd = g_thread_data + g_work_threads;
	for (pThreadData=g_thread_data; pThreadData<pDataEnd; pThreadData++)
	{
		pThreadData->ev_base = event_base_new(); //注意新版本libevent不再使用event_init()(非線程安全)
		if (pThreadData->ev_base == NULL)
		{
		}

		if (pipe(pThreadData->pipe_fds) != 0)
		{
		}

#if defined(OS_LINUX)
		if ((result=fd_add_flags(pThreadData->pipe_fds[0], \
				O_NONBLOCK | O_NOATIME)) != 0)
		{
			break;
		}
#else
#endif
//創建線程,線程處理函數是work_thread_entrance(該函數將管道的可讀事件添加到libevent中進行監聽,該事件的處理函數爲recv_notify_read).
		if ((result=pthread_create(&tid, &thread_attr, \
			work_thread_entrance, pThreadData)) != 0)
		{
			break;
		}
		else
		{
			if ((result=pthread_mutex_lock(&tracker_thread_lock)) != 0)
			{
			}
			g_tracker_thread_count++;
			if ((result=pthread_mutex_unlock(&tracker_thread_lock)) != 0)
			{
			}
		}
	}

	pthread_attr_destroy(&thread_attr);

	return 0;
}

三、sched_start

對於result=sched_start(&scheduleArray, &schedule_tid, g_thread_stack_size, (bool * volatile)&g_continue_flag),sched_start 是開啓線程sched_thread_entrance (sched_thread.c 260行),定期執行指定的任務;這些任務有log_sync_func、tracker_mem_check_alive、tracker_write_status_to_file。這三個函數以後再看。

static void *sched_thread_entrance(void *args)
{
	ScheduleContext *pContext;
	ScheduleEntry *pPrevious;
	ScheduleEntry *pCurrent;
	ScheduleEntry *pSaveNext;
	ScheduleEntry *pNode;
	ScheduleEntry *pUntil;
	int exec_count;
	int i;
	int sleep_time;

	pContext = (ScheduleContext *)args;
	if (sched_init_entries(&(pContext->scheduleArray)) != 0) //設置每個入口的next_call_time
	{
		free(pContext);
		return NULL;
	}
	sched_make_chain(pContext); //按照next_call_time將各個入口排序,並構成鏈表

	g_schedule_flag = true;
	while (*(pContext->pcontinue_flag)) //這個線程就一直在處理這個循環了。
	{
		sched_check_waiting(pContext); //在此等待有事務定時到達,並設置已到達定時的任務個數。
		if (pContext->scheduleArray.count == 0)  //no schedule entry
		{
			sleep(1);
			g_current_time = time(NULL);
			continue;
		}

		g_current_time = time(NULL);
		sleep_time = pContext->head->next_call_time - g_current_time;

		/*
		//fprintf(stderr, "count=%d, sleep_time=%d\n", \
			pContext->scheduleArray.count, sleep_time);
		*/
		while (sleep_time > 0 && *(pContext->pcontinue_flag))
		{
			sleep(1);
			g_current_time = time(NULL);
			if (sched_check_waiting(pContext) == 0)
			{
				break;
			}
			sleep_time--;
		}

		if (!(*(pContext->pcontinue_flag)))
		{
			break;
		}

		exec_count = 0;
		pCurrent = pContext->head;
		while (*(pContext->pcontinue_flag) && (pCurrent != NULL \
			&& pCurrent->next_call_time <= g_current_time))
		{
			//fprintf(stderr, "exec task id=%d\n", pCurrent->id);
			pCurrent->task_func(pCurrent->func_args); //調用入口函數處理任務
			pCurrent->next_call_time = g_current_time + \
						pCurrent->interval;
			pCurrent = pCurrent->next;
			exec_count++;
		}

		if (exec_count == 0 || pContext->scheduleArray.count == 1)
		{
			continue;
		}

		if (exec_count > pContext->scheduleArray.count / 2) //如果有一半以上的任務到達定時了,則重新調整鏈表順序。
		{
			sched_make_chain(pContext);
			continue;
		}

		pNode = pContext->head;    //如果只有少數幾個定時到達,則用以下方法(逐個插入)處理鏈表順序。
		pContext->head = pCurrent;  //new chain head
		for (i=0; i<exec_count; i++)
		{
			if (pNode->next_call_time >= pContext->tail->next_call_time)
			{
				pContext->tail->next = pNode;
				pContext->tail = pNode;
				pNode = pNode->next;
				pContext->tail->next = NULL;
				continue;
			}

			pPrevious = NULL;
			pUntil = pContext->head;
			while (pUntil != NULL && \
				pNode->next_call_time > pUntil->next_call_time)
			{
				pPrevious = pUntil;
				pUntil = pUntil->next;
			}

			pSaveNext = pNode->next;
			if (pPrevious == NULL)
			{
				pContext->head = pNode;
			}
			else
			{
				pPrevious->next = pNode;
			}
			pNode->next = pUntil;

			pNode = pSaveNext;
		}
	}

	g_schedule_flag = false;

	logDebug("file: "__FILE__", line: %d, " \
		"schedule thread exit", __LINE__);

	free(pContext);
	return NULL;
}

四、tracker_relationship_init-->relationship_thread_entrance

http://www3.xuebuyuan.com/1542033.html

tracker_relationship_init創建線程執行relationship_thread_entrance()。注意首次執行時,g_tracker_servers.servers爲NULL

static void *relationship_thread_entrance(void* arg)
{
#define MAX_SLEEP_SECONDS  10

	int fail_count;
	int sleep_seconds;

	fail_count = 0;
	while (g_continue_flag) //該線程在此不斷循環。
	{
		sleep_seconds = 1;
		if (g_tracker_servers.servers != NULL)
		{
			if (g_tracker_servers.leader_index < 0)
			{
				if (relationship_select_leader() != 0)
				{
					sleep_seconds = 1 + (int)((double)rand()
					* (double)MAX_SLEEP_SECONDS / RAND_MAX);
				}
			}
			else
			{
				if (relationship_ping_leader() == 0)
				{
					fail_count = 0;
				}
				else
				{
					fail_count++;
					if (fail_count >= 3)
					{
						g_tracker_servers.leader_index = -1;
					}
				}
			}
		}

		if (g_last_tracker_servers != NULL)
		{
			tracker_mem_file_lock();

			free(g_last_tracker_servers);
			g_last_tracker_servers = NULL;

			tracker_mem_file_unlock();
		}

		sleep(sleep_seconds);
	}

	return NULL;
}

五、log_set_cache(true)

設置g_log_contex->log_to_cache = 1,意味着write to buffer firstly, then sync to disk.


六、tracker_accept_loop(sock)

當有連接到來時,喚醒一個線程(向管道中寫入incomesock),對應線程將會調用recv_notify_read(),見tracker_service_init()。

void tracker_accept_loop(int server_sock)
{
	int incomesock;
	struct sockaddr_in inaddr;
	socklen_t sockaddr_len;
	struct tracker_thread_data *pThreadData;

	while (g_continue_flag)
	{
		sockaddr_len = sizeof(inaddr);
		incomesock = accept(server_sock, (struct sockaddr*)&inaddr, &sockaddr_len);
		if (incomesock < 0) //error
		{
			if (!(errno == EINTR || errno == EAGAIN))
			{
			}
			continue;
		}

		pThreadData = g_thread_data + incomesock % g_work_threads;
		if (write(pThreadData->pipe_fds[1], &incomesock, \
			sizeof(incomesock)) != sizeof(incomesock))
		{
			close(incomesock);
		}
	}
}

七、後續工作

本文涉及的線程:主線程,4個工作線程,一個調度線程,一個選主線程。本文中尚未展開的函數,recv_notify_read(),三個調度任務函數,選主函數relationship_thread_entrance(),將在後續文章中剖析。與tracker主函數相關的其他一些處理函數可參考源代碼。


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章