struct thread_info->flags值:
* thread information flags:
* TIF_SIGPENDING - signal pending //有信號沒有處理
* TIF_NEED_RESCHED- rescheduling necessary //當一個睡眠的進程被喚醒,當其要加入運行隊列時,如果其動態優先級比當前正在運行進程current的優先級高,那麼會在current線程上設置TIF_NEED_RESCHED,以告訴內核有新的高優先級的線程在等待內核調度。通常,一個睡眠的進程會在中斷處理函數中被喚醒。
struct thread_info->preempt_count 值(線程的搶佔計數器):
= 0:線程可以被搶佔
> 0: 線程已經被禁止搶佔,被搶佔的情況分爲:
1)preempt_count的低8位表示:當前線程被加鎖的次數,比如調用了_raw_spin_trylock()(也會關閉中斷),該值會+1
2)preempt_count的中間8位表示:當前線程禁止soft irq和tasklet下半段(或者說推遲函數被禁止)的次數,見__local_bh_disable(SOFTIRQ_OFFSET)和local_bh_disable()
3)preempt_count的bit[16-25]表示:當前線程被硬中斷打斷的次數,見irq_enter()
4)preempt_count的bit 26 表示:當前線程被非屏蔽中斷搶佔的次數
5)preempt_count的bit 30 表示:當前線程正在進行的次數,一般爲1,也就是說線程被內核其他線程搶佔的時候不能再被搶佔
另外,通過把搶佔計數器設置爲正而顯式地禁止內核搶佔,由preempt_disable完成。
/*
* We put the hardirq and softirq counter into the preemption
* counter. The bitmask has the following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
*
* The hardirq count can in theory reach the same as NR_IRQS.
* In reality, the number of nested IRQS is limited to the stack
* size as well. For archs with over 1000 IRQS it is not practical
* to expect that they will all nest. We give a max of 10 bits for
* hardirq nesting. An arch may choose to give less than 10 bits.
* m68k expects it to be 8.
*
* - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
* - bit 26 is the NMI_MASK
* - bit 28 is the PREEMPT_ACTIVE flag //貌似被重新定義到第30bit了
*
* PREEMPT_MASK: 0x000000FF
* SOFTIRQ_MASK: 0x0000FF00
* HARDIRQ_MASK: 0x03FF0000
* NMI_MASK: 0x04000000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define NMI_BITS 1
#define MAX_HARDIRQ_BITS 10
#ifndef HARDIRQ_BITS
# define HARDIRQ_BITS MAX_HARDIRQ_BITS
#endif
搶佔計數的增減本質上都是通過以下宏來實現的,有些函數只是它們的封裝:
# define add_preempt_count(val) do { preempt_count() += (val); } while (0)
# define sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
注意鎖相關的封裝宏如下:
#define preempt_disable()
->#define inc_preempt_count() add_preempt_count(1)
#define preempt_enable()
->... sub_preempt_count(1)
#define preempt_check_resched() \
do { \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
宏CONFIG_PREEMPT:表示是否開啓搶佔當前線程的功能(包括內核態搶佔和用戶態搶佔)。
比如在IRQ中斷處理過程中,當irq_handler處理完後準備返回內核空間或者用戶空間時,是否能夠搶佔當前執行的線程,見entry-armv.S:
__irq_svc: //中斷向量入口
svc_entry
irq_handler //通用的硬件中斷處理ISR
...
#ifdef CONFIG_PREEMPT
get_thread_info tsk
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count //獲取當前線程的搶佔計數
ldr r0, [tsk, #TI_FLAGS] @ get flags
teq r8, #0 @ if preempt count != 0 //檢查當前線程的搶佔計數
movne r0, #0 @ force flags to 0 //如果搶佔計數大於0(當前線程不能被搶佔),則清掉線程的flag;
tst r0, #_TIF_NEED_RESCHED //否則(搶佔計數爲0的情況==當前線程可以被搶佔),如果有高優先級的線程等待調度,(通過檢查當前進程的flag是否被置了_TIF_NEED_RESCHED)
blne svc_preempt //開啓搶佔調度
#endif
svc_exit r5 @ return from exception
UNWIND(.fnend )
ENDPROC(__irq_svc)
.ltorg
#ifdef CONFIG_PREEMPT
svc_preempt:
mov r8, lr
1: bl preempt_schedule_irq @ irq en/disable is done inside //搶佔調度
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED
moveq pc, r8 @ go again
b 1b
#endif
/*
* this is the entry point to schedule() from kernel preemption
* off of irq context.
* Note, that this is called and return with irqs disabled. This will
* protect us against recursive calling from irq.
*/
asmlinkage void __sched preempt_schedule_irq(void) //在禁止搶佔的情況下(其中一種情況就是關閉了中斷,即在中斷的上下文)才調用這個函數
{
struct thread_info *ti = current_thread_info();
/* Catch callers which need to be fixed */
BUG_ON(ti->preempt_count || !irqs_disabled());
do {
add_preempt_count(PREEMPT_ACTIVE); //將當前線程的thread_info->preempt_count的第30位的值+1(0x40000000),表示在當前進程上發生了內核搶佔,且正在進行
local_irq_enable(); //打開中斷,起碼其他即將被調度的線程是需要timer中斷的
__schedule(); //讓內核調度其他急需執行的線程
local_irq_disable(); //確保返回時也關閉中斷,避免被其他中斷
sub_preempt_count(PREEMPT_ACTIVE);//preempt_count的第30位的值-1,表示在當前進程上發生的內核搶佔,已經完成
/*
* Check again in case we missed a preemption opportunity
* between schedule and now.
*/
barrier();
} while (need_resched()); //如果又有其他高優先級的線程需要被調度,則循環
}
/*
* We use bit 30 of the preempt_count to indicate that kernel
* preemption is occurring. See <asm/hardirq.h>.
*/
#define PREEMPT_ACTIVE 0x40000000
/*
* this is the entry point to schedule() from in-kernel preemption
* off of preempt_enable. Kernel preemptions off return from interrupt
* occur there and call schedule directly.
*/
asmlinkage void __sched notrace preempt_schedule(void) //在搶佔enable的情況下,進行搶佔調度的函數
{
struct thread_info *ti = current_thread_info();
/*
* If there is a non-zero preempt_count or interrupts are disabled,
* we do not want to preempt the current task. Just return..
*/
if (likely(ti->preempt_count || irqs_disabled())) //如果當前線程已經禁止搶佔或已經關中斷時,不會再調度其他進程。也就是說線程被搶佔後一定要該線程的前一次的搶佔完成後才能再被搶佔,即搶佔不能嵌套執行
return;
do {
add_preempt_count_notrace(PREEMPT_ACTIVE);
__schedule();
sub_preempt_count_notrace(PREEMPT_ACTIVE);
/*
* Check again in case we missed a preemption opportunity
* between schedule and now.
*/
barrier();
} while (need_resched());
}
用戶搶佔發生的時機:
1)從系統調用返回用戶空間的時候
2)中斷處理程序返回用戶空間的時候。
內核搶佔發生的時機:
1)從中斷(異常)返回時,preempt_count爲0且need_resched置位(見從中斷返回);
2)在異常處理程序中(特別是系統調用)調用preempt_enable()來允許內核搶佔發生;
【Linux Kernel】搶佔
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.