linux內核互斥鎖mutex實現詳解(基於ARM處理器)

linux內核互斥鎖mutex實現詳解(基於ARM處理器)

1、互斥鎖mutex結構體

這裏寫圖片描述

  • count: 互斥鎖變量,0表示被佔用(已經被獲取),1表示沒有被佔用(空閒可獲取)
  • owner: 當前獲取該鎖的任務(線程/進程),細節沒有仔細看,對應實時系統,高優先級進程嘗試獲取被低優先級進程搶佔的互斥鎖使,可以通過類似owner類似的指針,優先級反轉,主動切換到低優先級進程,等低優先級釋放互斥鎖時檢查到高優先級進程因未獲取到互斥鎖而暫停了,主動執行進程調度,讓出cpu給高優先級進程
  • wait_list: 等待獲取互斥鎖的任務鏈表,互斥鎖釋放時會喚醒wait_list表頭任務
  • wait_lock: wait_list是多任務(進程/線程)共享的,需要採用自旋鎖互斥訪問,自旋鎖會不停循環檢查且不會阻塞任務的執行,適合時間短的加鎖。

2、互斥鎖mutex初始化(__mutex_init)

void
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
{
    atomic_set(&lock->count, 1); // 鎖計數器設置爲1,表示鎖當前未被佔用
    spin_lock_init(&lock->wait_lock); // wait_lock自旋鎖初始化owner, next都爲0(詳細細節可參考自旋鎖解析部分)
    INIT_LIST_HEAD(&lock->wait_list); // wait_list初始化(等待獲取互斥鎖的任務鏈表)
    mutex_clear_owner(lock); // owner指針清零
\#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
    osq_lock_init(&lock->osq);
\#endif

    debug_mutex_init(lock, name, key);
}

3、互斥鎖mutex獲取(mutex_lock)

void __sched mutex_lock(struct mutex *lock)
{
    might_sleep();
    /*
     * The locking fastpath is the 1->0 transition from
     * 'unlocked' into 'locked' state.
     */
    __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); // 獲取互斥鎖(通過lock->count地址可獲取lock地址)
    mutex_set_owner(lock); // 設置owner指針,當前線程的thread_info可以通過sp低13位清零獲取到
}
__visible void __sched
__mutex_lock_slowpath(atomic_t *lock_count)
{
    struct mutex *lock = container_of(lock_count, struct mutex, count); // 結構體成員變量地址減該變量偏移即得到結構體地址,addr(lock) = addr(count) - offset(count)

    __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
                NULL, _RET_IP_, NULL, 0);
}
static __always_inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
            struct lockdep_map *nest_lock, unsigned long ip,
            struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
{
    struct task_struct *task = current; // 通過sp低13位清零獲取到thread_info,再通過thread_info獲取到task指針
    struct mutex_waiter waiter;
    unsigned long flags;
    int ret;

    preempt_disable();
    mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);

    if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
        /* got the lock, yay! */
        preempt_enable();
        return 0;
    }

    spin_lock_mutex(&lock->wait_lock, flags); // 獲取lock->wait_lock自旋鎖,直到獲取成功爲止,因爲接下來的代碼要對lock的成員變量進行讀寫,需要互斥訪問

    /*
     * Once more, try to acquire the lock. Only try-lock the mutex if
     * it is unlocked to reduce unnecessary xchg() operations.
     */
    if (!mutex_is_locked(lock) &&
        (atomic_xchg_acquire(&lock->count, 0) == 1))  // 自旋鎖對lock讀寫已經鎖定,可直接讀lock互斥計數器,如果count爲1(鎖沒被佔用),讀count的值同時將1寫回到count(讀count的值是爲了判斷鎖是否被佔用,寫0表示鎖被佔用,此處有兩種情況,1、count爲0,鎖被他線程佔用,再寫0沒有影響,2、count爲1,鎖沒被佔用,寫0表示當前線程以獲取,其他線程再去獲取就爲0,不能再次獲取互斥鎖)
        goto skip_wait; // 獲取到互斥鎖,直接跳過等待代碼

    debug_mutex_lock_common(lock, &waiter);
    debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));

    /* add waiting tasks to the end of the waitqueue (FIFO): */
    list_add_tail(&waiter.list, &lock->wait_list); // 將waiter.list添加到互斥鎖等待鏈表末尾(fifo)
    waiter.task = task; // 設置waiter task指針爲當前線程的task

    lock_contended(&lock->dep_map, ip);

    for (;;) {
        /*
         * Lets try to take the lock again - this is needed even if
         * we get here for the first time (shortly after failing to
         * acquire the lock), to make sure that we get a wakeup once
         * it's unlocked. Later on, if we sleep, this is the
         * operation that gives us the lock. We xchg it to -1, so
         * that when we release the lock, we properly wake up the
         * other waiters. We only attempt the xchg if the count is
         * non-negative in order to avoid unnecessary xchg operations:
         */
        if (atomic_read(&lock->count) >= 0 &&
            (atomic_xchg_acquire(&lock->count, -1) == 1)) // 循環讀取互斥鎖計數器count的值,直到count爲1,表示互斥鎖可用,此處讀取互斥鎖計數器的同時也將互斥鎖的值設置爲0了
            break; // 如果獲取到互斥鎖,則跳出循環

        /*
         * got a signal? (This code gets eliminated in the
         * TASK_UNINTERRUPTIBLE case.)
         */
        if (unlikely(signal_pending_state(state, task))) {
            ret = -EINTR;
            goto err;
        }

        if (use_ww_ctx && ww_ctx->acquired > 0) {
            ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
            if (ret)
                goto err;
        }

        __set_task_state(task, state); // 設置當前任務狀態TASK_UNINTERRUPTIBLE

        /* didn't get the lock, go to sleep: */
        spin_unlock_mutex(&lock->wait_lock, flags); // 釋放自旋鎖(此處對lock的成員變量修改讀寫都已經完成,其他任務可讀寫修改)
        schedule_preempt_disabled(); // 執行一次調度,主動切換到其他任務,等其他任務釋放互斥鎖是會喚醒當前任務,繼續執行下面的函數
        spin_lock_mutex(&lock->wait_lock, flags); // 任務被喚醒,重新獲取自旋鎖
    }
    __set_task_state(task, TASK_RUNNING); // 設置當前任務運行狀態TASK_RUNNING

    mutex_remove_waiter(lock, &waiter, current_thread_info()); // 獲取到互斥鎖的時候直接執行的break,並沒有釋放自旋鎖,此處從互斥鎖鏈表將當前任務刪除將不會影響其他任務,不存在多任務併發訪問的情況
    /* set it to 0 if there are no waiters left: */
    if (likely(list_empty(&lock->wait_list)))
        atomic_set(&lock->count, 0);
    debug_mutex_free_waiter(&waiter);

skip_wait:
    /* got the lock - cleanup and rejoice! */
    lock_acquired(&lock->dep_map, ip);
    mutex_set_owner(lock); // 設置互斥鎖owner爲當前任務(線程)

    if (use_ww_ctx) {
        struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
        ww_mutex_set_context_slowpath(ww, ww_ctx);
    }

    spin_unlock_mutex(&lock->wait_lock, flags); // 釋放互自旋鎖
    preempt_enable();
    return 0;

err:
    mutex_remove_waiter(lock, &waiter, task_thread_info(task));
    spin_unlock_mutex(&lock->wait_lock, flags);
    debug_mutex_free_waiter(&waiter);
    mutex_release(&lock->dep_map, 1, ip);
    preempt_enable();
    return ret;
}

4、互斥鎖mutex釋放

void __sched mutex_unlock(struct mutex *lock)
{
    /*
     * The unlocking fastpath is the 0->1 transition from 'locked'
     * into 'unlocked' state:
     */
\#ifndef CONFIG_DEBUG_MUTEXES
    /*
     * When debugging is enabled we must not clear the owner before time,
     * the slow path will always be taken, and that clears the owner field
     * after verifying that it was indeed current.
     */
    mutex_clear_owner(lock); // 清空互斥鎖owner(此處不需要加自旋鎖,不存在讀寫衝突問題,沒有獲取到鎖的任務不會讀寫owner,沒有獲取到互斥的任務更不會釋放互斥鎖)
\#endif
    __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
}
__visible void
__mutex_unlock_slowpath(atomic_t *lock_count)
{
    struct mutex *lock = container_of(lock_count, struct mutex, count);

    __mutex_unlock_common_slowpath(lock, 1);
}
/*
 * Release the lock, slowpath:
 */
static inline void
__mutex_unlock_common_slowpath(struct mutex *lock, int nested)
{
    unsigned long flags;

    /*
     * As a performance measurement, release the lock before doing other
     * wakeup related duties to follow. This allows other tasks to acquire
     * the lock sooner, while still handling cleanups in past unlock calls.
     * This can be done as we do not enforce strict equivalence between the
     * mutex counter and wait_list.
     *
     *
     * Some architectures leave the lock unlocked in the fastpath failure
     * case, others need to leave it locked. In the later case we have to
     * unlock it here - as the lock counter is currently 0 or negative.
     */
    if (__mutex_slowpath_needs_to_unlock())
        atomic_set(&lock->count, 1); // 釋放自旋鎖(獲取互斥鎖函數對count讀寫最終都是通過原子操作一次實現讀寫,原子操作由硬件確保多任務互斥讀寫,此處不需要加自旋鎖,在這條語句與下條語句之間有可能其他任務申請到互斥鎖)

    spin_lock_mutex(&lock->wait_lock, flags); // 後續代碼需要修改lock成員變量,使用自旋鎖進行互斥讀寫
    mutex_release(&lock->dep_map, nested, _RET_IP_);
    debug_mutex_unlock(lock);

    if (!list_empty(&lock->wait_list)) { // 互斥鎖等待鏈表不爲空,有任務掛起等待獲取互斥鎖
        /* get the first entry from the wait-list: */
        struct mutex_waiter *waiter =
                list_entry(lock->wait_list.next,
                       struct mutex_waiter, list); // 獲取互斥鎖等待鏈表第一個元素

        debug_mutex_wake_waiter(lock, waiter);

        wake_up_process(waiter->task); // 喚醒等待鏈表表頭任務(fifo),添加到就緒紅黑樹,此處只是喚醒了任務,並沒有將需要喚醒的任務從互斥鎖等待鏈表移除,上面已經講了,互斥鎖剛釋放瞬間有可能被其他任務獲取,喚醒並不代表能立刻得到執行權限,在真正執行前也可能被其他任務搶佔,獲取互斥鎖的for循環並沒有再次將自己添加到互斥鎖等待鏈表,因此此處只是喚醒,最終由任務自己獲取的互斥鎖後,將自己從等待鏈表移除
    }

    spin_unlock_mutex(&lock->wait_lock, flags); // 釋放自旋鎖(自旋鎖是對互斥鎖的成員進行保護,不會掛起,互斥鎖暫停的處理器,而互斥鎖是對獲取到互斥鎖之後釋放互斥鎖之前的代碼進行保護,會因爲獲取不到互斥鎖而阻塞,暫停的是任務,處理器讓出給其他任務執行)
}

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章