Binder機制情景分析之深入驅動

一. 概述

看過上篇C服務應用篇內容你肯定已經瞭解binder的一個使用過程,但是肯定還會有很多疑問:

    1. service註冊服務是怎麼和ServiceManager聯繫上的?
    1. client是怎麼根據服務名找到的service進程?
    1. client獲取的handle和service註冊到ServiceManager的handle是否相同?
    1. client通過handle是怎麼調用的服務?

這篇開始結合binder驅動進行數據交互的分析;

1.1 驅動中重要的數據結構

| 數據結構 | 說明 |
| ------------ | ------- |
| binder_proc | 每個使用open打開binder設備文件的進程都會在驅動中創建一個binder_proc的結構, 用來記錄該<bar>進程的各種信息和狀態.例如:線程表,binder節點表,節點引用表 |
| binder_thread | 每個binder線程在binder驅動中都有一個對應的binder_thread結構.記錄了線程相關的信息,例如需要完成的任務等. |
| binder_node | bindder_proc 中有一張binder節點對象表,表項是binder_node結構. |
| binder_ref | binder_proc還有一張節點引用表,表象是binder_ref結構. 保存所引用對象的binder_node指針. |
| binder_buffer | 驅動通過mmap的方式創建了一塊大的緩存區,每次binder傳輸數據,會在緩存區分配一個binder_buffer的結構來保存數據. |

1.2 說明

先講解關於binder應用層調用binder_open()相關的公用流程的驅動代碼;

二. binder初始化-公共

2.1 binder_open

應用層open了binder驅動,對應驅動層代碼如下:

static int binder_open(struct inode *nodp, struct file *filp)
{
    struct binder_proc *proc;

    binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
             current->group_leader->pid, current->pid);

    proc = kzalloc(sizeof(*proc), GFP_KERNEL);                                        ①
    if (proc == NULL)
        return -ENOMEM;
    get_task_struct(current);                                                         ②
    proc->tsk = current;
    INIT_LIST_HEAD(&proc->todo);                                                      ③
    init_waitqueue_head(&proc->wait);
    proc->default_priority = task_nice(current);

    binder_lock(__func__);

    binder_stats_created(BINDER_STAT_PROC);
    hlist_add_head(&proc->proc_node, &binder_procs);                                  ④
    proc->pid = current->group_leader->pid;
    INIT_LIST_HEAD(&proc->delivered_death);
    filp->private_data = proc;                                                        ⑤

    binder_unlock(__func__);

    if (binder_debugfs_dir_entry_proc) {
        char strbuf[11];

        snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
        proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
            binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
    }

    return 0;
}
①: 爲當前進程分配一個struct binder_proc寬度空間給proc;
②: 獲取當前進程的task結構;
③: 初始化binder_proc中的todo鏈表;
④: 將當前進程的binder_proc插入全局變量binder_procs中;
⑤: 將proc保存到文件結構中,供下次調用使用;

binder_procs這是一個全局的紅黑樹變量,該全局變量在binder驅動的最前方使用 static HLIST_HEAD(binder_procs);進行的初始化;
binder_open()函數的主要功能是打開binder驅動的設備文件,爲當前進程創建和初始化binder_proc結構體proc.將proc插入到全局的紅黑樹binder_procs中,供將來查找用;
同時變量proc還放到file結構的private_data字段中,調用驅動的其他操作時可從file結構中取出代表當前進程的binder_proc結構體使用;

2.2 binder_ioctl

應用層調用ioctl時對應的驅動層代碼:

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    int ret;
    struct binder_proc *proc = filp->private_data;
    struct binder_thread *thread;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;

    /*pr_info("binder_ioctl: %d:%d %x %lx\n",
            proc->pid, current->pid, cmd, arg);*/

    trace_binder_ioctl(cmd, arg);

    ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
    if (ret)
        goto err_unlocked;

    binder_lock(__func__);
    thread = binder_get_thread(proc);                                                ①
    if (thread == NULL) {
        ret = -ENOMEM;
        goto err;
    }

    switch (cmd) {
    case BINDER_WRITE_READ:
        ret = binder_ioctl_write_read(filp, cmd, arg, thread);
        if (ret)
            goto err;
        break;
    case BINDER_SET_MAX_THREADS:
        if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
            ret = -EINVAL;
            goto err;
        }
        break;
    case BINDER_SET_CONTEXT_MGR:
        ret = binder_ioctl_set_ctx_mgr(filp);
        if (ret)
            goto err;
        break;
    case BINDER_THREAD_EXIT:
        binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
                 proc->pid, thread->pid);
        binder_free_thread(proc, thread);
        thread = NULL;
        break;
    case BINDER_VERSION: {
        struct binder_version __user *ver = ubuf;

        if (size != sizeof(struct binder_version)) {
            ret = -EINVAL;
            goto err;
        }
        if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
                 &ver->protocol_version)) {
            ret = -EINVAL;
            goto err;
        }
        break;
    }
    default:
        ret = -EINVAL;
        goto err;
    }
    ret = 0;
err:
    if (thread)
        thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
    binder_unlock(__func__);
    wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
    if (ret && ret != -ERESTARTSYS)
        pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
err_unlocked:
    trace_binder_ioctl_done(ret);
    return ret;
}

獲取binder版本號很簡單,發BINDER_VERSION命令給驅動,驅動回覆BINDER_CURRENT_PROTOCOL_VERSION給用戶空間;
這裏講下各個命令的意義:

命令 含義 數據格式
BINDER_WRITE_READ 向驅動讀取和寫入數據.可同時讀和寫 struct binder_write_read
BINDER_SET_MAX_THREADS 設置線程池的最大的線程數,達到上限後驅動將不會在通知應用層啓動新線程 size_t
BINDER_SET_CONTEXT_MGR 將本進程設置爲binder系統的管理進程,只有servicemanager進程纔會使用這個命令且只能調用一次 int
BINDER_THREAD_EXIT 通知驅動當前線程要退出了,以便驅動清理該線程相關的數據 int
BINDER_VERSION 獲取binder的版本號 struct binder_version

但是這有個注意點:

①: 第一次調用ioctl時會爲該進程創建一個線程;

2.3 binder_get_thread

static struct binder_thread *binder_get_thread(struct binder_proc *proc)
{
    struct binder_thread *thread = NULL;
    struct rb_node *parent = NULL;
    struct rb_node **p = &proc->threads.rb_node;

    while (*p) {                                                                        ①
        parent = *p;
        thread = rb_entry(parent, struct binder_thread, rb_node);

        if (current->pid < thread->pid)
            p = &(*p)->rb_left;
        else if (current->pid > thread->pid)
            p = &(*p)->rb_right;
        else
            break;
    }
    if (*p == NULL) {
        thread = kzalloc(sizeof(*thread), GFP_KERNEL);                                    ②
        if (thread == NULL)
            return NULL;
        binder_stats_created(BINDER_STAT_THREAD);
        thread->proc = proc;
        thread->pid = current->pid;
        init_waitqueue_head(&thread->wait);                                               ③
        INIT_LIST_HEAD(&thread->todo);
        rb_link_node(&thread->rb_node, parent, p);
        rb_insert_color(&thread->rb_node, &proc->threads);                                ④
        thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
        thread->return_error = BR_OK;
        thread->return_error2 = BR_OK;
    }
    return thread;
}

該函數的主要功能是從當前進程信息表中找到掛在下面的線程,struct binder_thread是用掛在進程信息表下threads節點的紅黑樹鏈表下;

①: 先遍歷threads節點的紅黑樹鏈表;
②: 如果沒有查找到,則分配一個struct binder_thread長度的空間;
③: 初始化等待隊列頭節點和thread的todo鏈表;
④: 將該線程插入到進程的threads節點;

首先先遍歷該鏈表,如果爲找到線程信息則創建一個binder_thread線程,接着初始化線程等待隊列和線程的todo鏈表,再將該線程節點掛在進程的信息表中的threads節點的紅黑樹中;

2.4 binder_mmap

static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
    int ret;
    struct vm_struct *area;
    struct binder_proc *proc = filp->private_data;                                   ①
    const char *failure_string;
    struct binder_buffer *buffer;

    if (proc->tsk != current)
        return -EINVAL;

    if ((vma->vm_end - vma->vm_start) > SZ_4M)
        vma->vm_end = vma->vm_start + SZ_4M;

    binder_debug(BINDER_DEBUG_OPEN_CLOSE,
             "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
             proc->pid, vma->vm_start, vma->vm_end,
             (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
             (unsigned long)pgprot_val(vma->vm_page_prot));

    if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
        ret = -EPERM;
        failure_string = "bad vm_flags";
        goto err_bad_arg;
    }
    vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;

    mutex_lock(&binder_mmap_lock);
    if (proc->buffer) {
        ret = -EBUSY;
        failure_string = "already mapped";
        goto err_already_mapped;
    }

    area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);                     ②
    if (area == NULL) {
        ret = -ENOMEM;
        failure_string = "get_vm_area";
        goto err_get_vm_area_failed;
    }
    proc->buffer = area->addr;                                                       ③
    proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
    mutex_unlock(&binder_mmap_lock);

#ifdef CONFIG_CPU_CACHE_VIPT
    if (cache_is_vipt_aliasing()) {
        while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
            pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
            vma->vm_start += PAGE_SIZE;
        }
    }
#endif
    proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); ④
    if (proc->pages == NULL) {
        ret = -ENOMEM;
        failure_string = "alloc page array";
        goto err_alloc_pages_failed;
    }
    proc->buffer_size = vma->vm_end - vma->vm_start;

    vma->vm_ops = &binder_vm_ops;
    vma->vm_private_data = proc;

    if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {    ⑤
        ret = -ENOMEM;
        failure_string = "alloc small buf";
        goto err_alloc_small_buf_failed;
    }
    buffer = proc->buffer;
    INIT_LIST_HEAD(&proc->buffers);
    list_add(&buffer->entry, &proc->buffers);                                                ⑥
    buffer->free = 1;
    binder_insert_free_buffer(proc, buffer);
    proc->free_async_space = proc->buffer_size / 2;
    barrier();
    proc->files = get_files_struct(current);
    proc->vma = vma;
    proc->vma_vm_mm = vma->vm_mm;

    /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
         proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
    return 0;

err_alloc_small_buf_failed:
    kfree(proc->pages);
    proc->pages = NULL;
err_alloc_pages_failed:
    mutex_lock(&binder_mmap_lock);
    vfree(proc->buffer);
    proc->buffer = NULL;
err_get_vm_area_failed:
err_already_mapped:
    mutex_unlock(&binder_mmap_lock);
err_bad_arg:
    pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
           proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
    return ret;
}
①: filp->private_data保存了我們open設備時創建的binder_proc信息;
②: 爲用戶進程分配一塊內核空間作爲緩衝區;
③: 把分配的緩衝區指針存放到binder_proc的buffer字段;
④: 分配pages空間;
④: 在內核分配一塊同樣頁數的內核空間,並把它的物理內存和前面爲用戶進程分配的內存地址關聯;
⑤: 將剛纔分配的內存塊加入用戶進程內存鏈表;

binder_mmap函數首先調用get_vm_area()分配一塊地址空間,這裏創建的爲虛擬內存,位於用戶進程空間,接着調用binder_update_page_range()建立虛擬內存到物理內存的映射,這樣用戶空間和內核空間就能共享一塊空間了;

binder運用了mmap機制,在進程間的數據傳輸時就減小了拷貝次數; 如果不用mmap,從發送進程拷貝到內核空間調用一次copy_from_user,從內核空間到目標進程又需要調用copy_to_user,這樣就發生兩次數據拷貝.但運用了mmap後,只需要把發送的進程用戶空間數據拷貝到發送進程的內核空間調用一次copy_from_user,因爲目標進程內核空間緩存區和發送進程內核空間的緩衝區是共享;

三. ServiceManager

3.1 註冊爲Manager

3.1.1 binder_ioctl_set_ctx_mgr

static int binder_ioctl_set_ctx_mgr(struct file *filp)
{
    int ret = 0;
    struct binder_proc *proc = filp->private_data;
    kuid_t curr_euid = current_euid();

    if (binder_context_mgr_node != NULL) {
        pr_err("BINDER_SET_CONTEXT_MGR already set\n");
        ret = -EBUSY;
        goto out;
    }
    ret = security_binder_set_context_mgr(proc->tsk);
    if (ret < 0)
        goto out;
    if (uid_valid(binder_context_mgr_uid)) {
        if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
            pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
                   from_kuid(&init_user_ns, curr_euid),
                   from_kuid(&init_user_ns,
                    binder_context_mgr_uid));
            ret = -EPERM;
            goto out;
        }
    } else {
        binder_context_mgr_uid = curr_euid;                                    ①
    }
    binder_context_mgr_node = binder_new_node(proc, 0, 0);                   ②
    if (binder_context_mgr_node == NULL) {
        ret = -ENOMEM;
        goto out;
    }
    binder_context_mgr_node->local_weak_refs++;
    binder_context_mgr_node->local_strong_refs++;
    binder_context_mgr_node->has_strong_ref = 1;
    binder_context_mgr_node->has_weak_ref = 1;
out:
    return ret;
}
①: 保存當前進程的用戶id到全局變量binder_context_mgr_uid;
②: 爲當前進程創建一個binder_node節點,保存到全局變量binder_context_mgr_node;

3.2 進入循環

調用ioctl函數寫入BC_ENTER_LOOPER命令給驅動,進入循環;
當調用ioctl函數時命令爲BINDER_WRITE_READ則調用下面函數:

3.2.1 binder_ioctl_write_read

static int binder_ioctl_write_read(struct file *filp,
                unsigned int cmd, unsigned long arg,
                struct binder_thread *thread)
{
    int ret = 0;
    struct binder_proc *proc = filp->private_data;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;
    struct binder_write_read bwr;

    if (size != sizeof(struct binder_write_read)) {
        ret = -EINVAL;
        goto out;
    }
    if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {                                    ①
        ret = -EFAULT;
        goto out;
    }
    binder_debug(BINDER_DEBUG_READ_WRITE,
             "%d:%d write %lld at %016llx, read %lld at %016llx\n",
             proc->pid, thread->pid,
             (u64)bwr.write_size, (u64)bwr.write_buffer,
             (u64)bwr.read_size, (u64)bwr.read_buffer);

    if (bwr.write_size > 0) {                                                         ②
        ret = binder_thread_write(proc, thread,
                      bwr.write_buffer,
                      bwr.write_size,
                      &bwr.write_consumed);
        trace_binder_write_done(ret);
        if (ret < 0) {
            bwr.read_consumed = 0;
            if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                ret = -EFAULT;
            goto out;
        }
    }
    if (bwr.read_size > 0) {
        ret = binder_thread_read(proc, thread, bwr.read_buffer,
                     bwr.read_size,
                     &bwr.read_consumed,
                     filp->f_flags & O_NONBLOCK);
        trace_binder_read_done(ret);
        if (!list_empty(&proc->todo))
            wake_up_interruptible(&proc->wait);
        if (ret < 0) {
            if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                ret = -EFAULT;
            goto out;
        }
    }
    binder_debug(BINDER_DEBUG_READ_WRITE,
             "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
             proc->pid, thread->pid,
             (u64)bwr.write_consumed, (u64)bwr.write_size,
             (u64)bwr.read_consumed, (u64)bwr.read_size);
    if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
        ret = -EFAULT;
        goto out;
    }
out:
    return ret;
}
①: 將用戶空間的數據拷貝到內核空間;
②: 這裏可以看到驅動判斷讀寫是根據讀寫buf的size來分辨且讀寫操作互不干擾;
struct binder_write_read {
    binder_size_t        write_size;    /* bytes to write */
    binder_size_t        write_consumed;    /* bytes consumed by driver */
    binder_uintptr_t    write_buffer;
    binder_size_t        read_size;    /* bytes to read */
    binder_size_t        read_consumed;    /* bytes consumed by driver */
    binder_uintptr_t    read_buffer;
};

這個結構體數據很簡單,一般只要填寫size和buf指針就可以,buf的數據個數C服務應用篇有介紹;

3.2.2 binder_thread_write

因爲binder_thread_write()太長了所以每次用到了哪個命令再細講;

3.2.2.1 BC_ENTER_LOOPER

        case BC_ENTER_LOOPER:
            binder_debug(BINDER_DEBUG_THREADS,
                     "%d:%d BC_ENTER_LOOPER\n",
                     proc->pid, thread->pid);
            if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
                thread->looper |= BINDER_LOOPER_STATE_INVALID;
                binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
                    proc->pid, thread->pid);
            }
            thread->looper |= BINDER_LOOPER_STATE_ENTERED;
            break;

這個命令有解釋過告訴該線程進入循環狀態,這個線程就是前面第一次調用ioctl創建的struct binder_thread結構的線程;

3.2.3 binder_thread_read

接下來進入for循環後,又調用了一次ioctl進行讀操作

static int binder_thread_read(struct binder_proc *proc,
                  struct binder_thread *thread,
                  binder_uintptr_t binder_buffer, size_t size,
                  binder_size_t *consumed, int non_block)
{
    void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;

    int ret = 0;
    int wait_for_proc_work;

    if (*consumed == 0) {                                                        ①
        if (put_user(BR_NOOP, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
    }

retry:
    wait_for_proc_work = thread->transaction_stack == NULL &&                    ②
                list_empty(&thread->todo);

    if (thread->return_error != BR_OK && ptr < end) {
        if (thread->return_error2 != BR_OK) {
            if (put_user(thread->return_error2, (uint32_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(uint32_t);
            binder_stat_br(proc, thread, thread->return_error2);
            if (ptr == end)
                goto done;
            thread->return_error2 = BR_OK;
        }
        if (put_user(thread->return_error, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
        binder_stat_br(proc, thread, thread->return_error);
        thread->return_error = BR_OK;
        goto done;
    }


    thread->looper |= BINDER_LOOPER_STATE_WAITING;
    if (wait_for_proc_work)                                                     ③
        proc->ready_threads++;

    binder_unlock(__func__);

    trace_binder_wait_for_work(wait_for_proc_work,
                   !!thread->transaction_stack,
                   !list_empty(&thread->todo));
    if (wait_for_proc_work) {
        if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |                  ④
                    BINDER_LOOPER_STATE_ENTERED))) {
            binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
                proc->pid, thread->pid, thread->looper);
            wait_event_interruptible(binder_user_error_wait,
                         binder_stop_on_user_error < 2);
        }
        binder_set_nice(proc->default_priority);
        if (non_block) {
            if (!binder_has_proc_work(proc, thread))
                ret = -EAGAIN;
        } else
            ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));    ⑤
    } else {
        if (non_block) {
            if (!binder_has_thread_work(thread))
                ret = -EAGAIN;
        } else
            ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
    }

    binder_lock(__func__);

    if (wait_for_proc_work)                                                 ⑥
        proc->ready_threads--;
    thread->looper &= ~BINDER_LOOPER_STATE_WAITING;

    if (ret)
        return ret;

    while (1) {
        uint32_t cmd;
        struct binder_transaction_data tr;
        struct binder_work *w;
        struct binder_transaction *t = NULL;

        if (!list_empty(&thread->todo)) {                                     ⑦
            w = list_first_entry(&thread->todo, struct binder_work,
                         entry);
        } else if (!list_empty(&proc->todo) && wait_for_proc_work) {          ⑧
            w = list_first_entry(&proc->todo, struct binder_work,
                         entry);
        } else {
            /* no data added */
            if (ptr - buffer == 4 &&
                !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
                goto retry;
            break;
        }

        if (end - ptr < sizeof(tr) + 4)
            break;

        switch (w->type) {
        case BINDER_WORK_TRANSACTION: {
            t = container_of(w, struct binder_transaction, work);              ⑨
        } break;
        case BINDER_WORK_TRANSACTION_COMPLETE: {
            cmd = BR_TRANSACTION_COMPLETE;
            if (put_user(cmd, (uint32_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(uint32_t);

            binder_stat_br(proc, thread, cmd);
            binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
                     "%d:%d BR_TRANSACTION_COMPLETE\n",
                     proc->pid, thread->pid);

            list_del(&w->entry);                                               ⑩
            kfree(w);
            binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
        } break;
        case BINDER_WORK_NODE: {
            struct binder_node *node = container_of(w, struct binder_node, work);
            uint32_t cmd = BR_NOOP;
            const char *cmd_name;
            int strong = node->internal_strong_refs || node->local_strong_refs;
            int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;

            if (weak && !node->has_weak_ref) {
                cmd = BR_INCREFS;
                cmd_name = "BR_INCREFS";
                node->has_weak_ref = 1;
                node->pending_weak_ref = 1;
                node->local_weak_refs++;
            } else if (strong && !node->has_strong_ref) {
                cmd = BR_ACQUIRE;
                cmd_name = "BR_ACQUIRE";
                node->has_strong_ref = 1;
                node->pending_strong_ref = 1;
                node->local_strong_refs++;
            } else if (!strong && node->has_strong_ref) {
                cmd = BR_RELEASE;
                cmd_name = "BR_RELEASE";
                node->has_strong_ref = 0;
            } else if (!weak && node->has_weak_ref) {
                cmd = BR_DECREFS;
                cmd_name = "BR_DECREFS";
                node->has_weak_ref = 0;
            }
            if (cmd != BR_NOOP) {
                if (put_user(cmd, (uint32_t __user *)ptr))
                    return -EFAULT;
                ptr += sizeof(uint32_t);
                if (put_user(node->ptr,
                         (binder_uintptr_t __user *)ptr))
                    return -EFAULT;
                ptr += sizeof(binder_uintptr_t);
                if (put_user(node->cookie,
                         (binder_uintptr_t __user *)ptr))
                    return -EFAULT;
                ptr += sizeof(binder_uintptr_t);

                binder_stat_br(proc, thread, cmd);
                binder_debug(BINDER_DEBUG_USER_REFS,
                         "%d:%d %s %d u%016llx c%016llx\n",
                         proc->pid, thread->pid, cmd_name,
                         node->debug_id,
                         (u64)node->ptr, (u64)node->cookie);
            } else {
                list_del_init(&w->entry);
                if (!weak && !strong) {
                    binder_debug(BINDER_DEBUG_INTERNAL_REFS,
                             "%d:%d node %d u%016llx c%016llx deleted\n",
                             proc->pid, thread->pid,
                             node->debug_id,
                             (u64)node->ptr,
                             (u64)node->cookie);
                    rb_erase(&node->rb_node, &proc->nodes);
                    kfree(node);
                    binder_stats_deleted(BINDER_STAT_NODE);
                } else {
                    binder_debug(BINDER_DEBUG_INTERNAL_REFS,
                             "%d:%d node %d u%016llx c%016llx state unchanged\n",
                             proc->pid, thread->pid,
                             node->debug_id,
                             (u64)node->ptr,
                             (u64)node->cookie);
                }
            }
        } break;
        case BINDER_WORK_DEAD_BINDER:
        case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
        case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
            struct binder_ref_death *death;
            uint32_t cmd;

            death = container_of(w, struct binder_ref_death, work);
            if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
                cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
            else
                cmd = BR_DEAD_BINDER;
            if (put_user(cmd, (uint32_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(uint32_t);
            if (put_user(death->cookie,
                     (binder_uintptr_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(binder_uintptr_t);
            binder_stat_br(proc, thread, cmd);
            binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
                     "%d:%d %s %016llx\n",
                      proc->pid, thread->pid,
                      cmd == BR_DEAD_BINDER ?
                      "BR_DEAD_BINDER" :
                      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
                      (u64)death->cookie);

            if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
                list_del(&w->entry);
                kfree(death);
                binder_stats_deleted(BINDER_STAT_DEATH);
            } else
                list_move(&w->entry, &proc->delivered_death);
            if (cmd == BR_DEAD_BINDER)
                goto done; /* DEAD_BINDER notifications can cause transactions */
        } break;
        }

        if (!t)
            continue;

        BUG_ON(t->buffer == NULL);
        if (t->buffer->target_node) {
            struct binder_node *target_node = t->buffer->target_node;

            tr.target.ptr = target_node->ptr;
            tr.cookie =  target_node->cookie;
            t->saved_priority = task_nice(current);
            if (t->priority < target_node->min_priority &&
                !(t->flags & TF_ONE_WAY))
                binder_set_nice(t->priority);
            else if (!(t->flags & TF_ONE_WAY) ||
                 t->saved_priority > target_node->min_priority)
                binder_set_nice(target_node->min_priority);
            cmd = BR_TRANSACTION;                                              ①①
        } else {
            tr.target.ptr = 0;
            tr.cookie = 0;
            cmd = BR_REPLY;
        }
        tr.code = t->code;                                                   ①②
        tr.flags = t->flags;
        tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);

        if (t->from) {
            struct task_struct *sender = t->from->proc->tsk;

            tr.sender_pid = task_tgid_nr_ns(sender,
                            task_active_pid_ns(current));
        } else {
            tr.sender_pid = 0;
        }

        tr.data_size = t->buffer->data_size;
        tr.offsets_size = t->buffer->offsets_size;
        tr.data.ptr.buffer = (binder_uintptr_t)(
                    (uintptr_t)t->buffer->data +
                    proc->user_buffer_offset);
        tr.data.ptr.offsets = tr.data.ptr.buffer +
                    ALIGN(t->buffer->data_size,
                        sizeof(void *));

        if (put_user(cmd, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
        if (copy_to_user(ptr, &tr, sizeof(tr)))                               ①③
            return -EFAULT;
        ptr += sizeof(tr);

        trace_binder_transaction_received(t);
        binder_stat_br(proc, thread, cmd);
        binder_debug(BINDER_DEBUG_TRANSACTION,
                 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
                 proc->pid, thread->pid,
                 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
                 "BR_REPLY",
                 t->debug_id, t->from ? t->from->proc->pid : 0,
                 t->from ? t->from->pid : 0, cmd,
                 t->buffer->data_size, t->buffer->offsets_size,
                 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);

        list_del(&t->work.entry);
        t->buffer->allow_user_free = 1;
        if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
            t->to_parent = thread->transaction_stack;
            t->to_thread = thread;
            thread->transaction_stack = t;
        } else {
            t->buffer->transaction = NULL;
            kfree(t);
            binder_stats_deleted(BINDER_STAT_TRANSACTION);
        }
        break;
    }

done:

    *consumed = ptr - buffer;                                             ①④
    if (proc->requested_threads + proc->ready_threads == 0 &&
        proc->requested_threads_started < proc->max_threads &&
        (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
         BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
         /*spawn a new thread if we leave this out */) {
        proc->requested_threads++;
        binder_debug(BINDER_DEBUG_THREADS,
                 "%d:%d BR_SPAWN_LOOPER\n",
                 proc->pid, thread->pid);
        if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
            return -EFAULT;
        binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
    }
    return 0;
}
①: 先判斷readbuf結構傳進來的值是否爲0,爲0則返回一個BR_NOOP到用戶空間,但是此時用戶空間線程是睡眠的;
②: 如果當前線程的todo鏈表爲空且傳送數據棧無數據時,則表示當前進程空閒;
③: 如果當前進程在writeBC_REGISTER_LOOPER or BC_ENTER_LOOPER前就開始執行讀操作,則進入休眠;
④: 進入休眠,喚醒後會檢測binder_has_proc_work,當前進程是否工作(判斷當前進程的todo鏈表是否爲空和thread的looper狀態),未工作則繼續休眠;
⑤: 進入休眠,喚醒後會檢測當前線程是否工作;
⑥: 能走到這說明已經喚醒了,進程的ready線程計數減一,且線程的looper等待狀態清除;
⑦: 先查看線程的todo鏈表中是否有要執行的工作;
⑧: 再檢查進程的todo鏈表中是否有需要執行的工作;
⑨: 通過binder_work節點找到struct binder_transaction結構體地址;
⑩:如果工作類型是TRANSACTION_COMPLETE則表示工作已經執行完了,可以將此工作從線程或進程的todo鏈表中刪除;
①①: 回覆命令BR_TRANSACTIONBR_REPLY;
①②: 填充回覆的數據;
①③: 將tr的數據拷貝到用戶空間,ptr指針指向的是用戶空間的那個readbuf;
①④: 最後consumed中保存了此次回覆數據的長度;

注意看第十三點:

        tr.data.ptr.buffer = (binder_uintptr_t)(
                    (uintptr_t)t->buffer->data +
                    proc->user_buffer_offset);

拷貝個體用戶空間的僅僅是數據buf地址,因爲使用了mmap,用戶空間可以直接使用這塊內存,這裏也體現了拷貝一次的效率;

這裏你可以發現read到的數據格式一般都是BR_NOOP+CMD+數據+CMD+數據....;
ServiceManager在剛進入循環開始第一次讀操作時,沒有其他線程就緒,此時只是返回一個BR_NOOP就開始休眠了;

3.3 總結流程

clipboard.png

四. led_control_service

4.1 註冊服務

調用led_control_server.c中:

svcmgr_publish(bs, svcmgr, LED_CONTROL_SERVER_NAME, led_control);

註冊爲一個服務者,這裏面主要調用了binder_call()寫入了BC_TRANSACTION命令,詳細的流程C服務應用篇已經寫過了,現在就主要結合驅動分析;
這裏需要注意的是binder_call()調用是同時讀寫binder驅動的,先看下寫操作再看讀操作;

4.1.1 binder_thread_write

4.1.1.1 BC_TRANSACTION

        case BC_TRANSACTION:
        case BC_REPLY: {
            struct binder_transaction_data tr;

            if (copy_from_user(&tr, ptr, sizeof(tr)))
                return -EFAULT;
            ptr += sizeof(tr);
            binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
            break;
        }

先將用戶空間write_buffer數據拷貝到內核的struct binder_transaction_data tr中, 再調用binder_transaction()處理這些數據;

4.1.1.2 binder_transaction

這個函數巨長....

static void binder_transaction(struct binder_proc *proc,
                   struct binder_thread *thread,
                   struct binder_transaction_data *tr, int reply)
{
    struct binder_transaction *t;
    struct binder_work *tcomplete;
    binder_size_t *offp, *off_end;
    struct binder_proc *target_proc;
    struct binder_thread *target_thread = NULL;
    struct binder_node *target_node = NULL;
    struct list_head *target_list;
    wait_queue_head_t *target_wait;
    struct binder_transaction *in_reply_to = NULL;
    struct binder_transaction_log_entry *e;
    uint32_t return_error;

    e = binder_transaction_log_add(&binder_transaction_log);
    e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
    e->from_proc = proc->pid;
    e->from_thread = thread->pid;
    e->target_handle = tr->target.handle;
    e->data_size = tr->data_size;
    e->offsets_size = tr->offsets_size;

    if (reply) {
    ......
    } else {
        if (tr->target.handle) {
            struct binder_ref *ref;

            ref = binder_get_ref(proc, tr->target.handle);                                ①
            if (ref == NULL) {
                binder_user_error("%d:%d got transaction to invalid handle\n",
                    proc->pid, thread->pid);
                return_error = BR_FAILED_REPLY;
                goto err_invalid_target_handle;
            }
            target_node = ref->node;
        } else {
            target_node = binder_context_mgr_node;                                        ②
            if (target_node == NULL) {
                return_error = BR_DEAD_REPLY;
                goto err_no_context_mgr_node;
            }
        }
        e->to_node = target_node->debug_id;
        target_proc = target_node->proc;                                                ③
        if (target_proc == NULL) {
            return_error = BR_DEAD_REPLY;
            goto err_dead_binder;
        }
        if (security_binder_transaction(proc->tsk,
                        target_proc->tsk) < 0) {
            return_error = BR_FAILED_REPLY;
            goto err_invalid_target_handle;
        }
        if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {                   ④
            struct binder_transaction *tmp;

            tmp = thread->transaction_stack;
            if (tmp->to_thread != thread) {
                binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
                    proc->pid, thread->pid, tmp->debug_id,
                    tmp->to_proc ? tmp->to_proc->pid : 0,
                    tmp->to_thread ?
                    tmp->to_thread->pid : 0);
                return_error = BR_FAILED_REPLY;
                goto err_bad_call_stack;
            }
            while (tmp) {
                if (tmp->from && tmp->from->proc == target_proc)                             ⑤
                    target_thread = tmp->from;
                tmp = tmp->from_parent;
            }
        }
    }
    if (target_thread) {
        e->to_thread = target_thread->pid;
        target_list = &target_thread->todo;
        target_wait = &target_thread->wait;
    } else {
        target_list = &target_proc->todo;
        target_wait = &target_proc->wait;
    }
    e->to_proc = target_proc->pid;

    /* TODO: reuse incoming transaction for reply */
    t = kzalloc(sizeof(*t), GFP_KERNEL);
    if (t == NULL) {
        return_error = BR_FAILED_REPLY;
        goto err_alloc_t_failed;
    }
    binder_stats_created(BINDER_STAT_TRANSACTION);

    tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
    if (tcomplete == NULL) {
        return_error = BR_FAILED_REPLY;
        goto err_alloc_tcomplete_failed;
    }
    binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);

    t->debug_id = ++binder_last_id;
    e->debug_id = t->debug_id;

    if (reply)
        binder_debug(BINDER_DEBUG_TRANSACTION,
                 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
                 proc->pid, thread->pid, t->debug_id,
                 target_proc->pid, target_thread->pid,
                 (u64)tr->data.ptr.buffer,
                 (u64)tr->data.ptr.offsets,
                 (u64)tr->data_size, (u64)tr->offsets_size);
    else
        binder_debug(BINDER_DEBUG_TRANSACTION,
                 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
                 proc->pid, thread->pid, t->debug_id,
                 target_proc->pid, target_node->debug_id,
                 (u64)tr->data.ptr.buffer,
                 (u64)tr->data.ptr.offsets,
                 (u64)tr->data_size, (u64)tr->offsets_size);

    if (!reply && !(tr->flags & TF_ONE_WAY))
        t->from = thread;
    else
        t->from = NULL;
    t->sender_euid = task_euid(proc->tsk);
    t->to_proc = target_proc;
    t->to_thread = target_thread;
    t->code = tr->code;
    t->flags = tr->flags;
    t->priority = task_nice(current);

    trace_binder_transaction(reply, t, target_node);

    t->buffer = binder_alloc_buf(target_proc, tr->data_size,                       ⑥
        tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
    if (t->buffer == NULL) {
        return_error = BR_FAILED_REPLY;
        goto err_binder_alloc_buf_failed;
    }
    t->buffer->allow_user_free = 0;
    t->buffer->debug_id = t->debug_id;
    t->buffer->transaction = t;
    t->buffer->target_node = target_node;
    trace_binder_transaction_alloc_buf(t->buffer);
    if (target_node)
        binder_inc_node(target_node, 1, 0, NULL);

    offp = (binder_size_t *)(t->buffer->data +
                 ALIGN(tr->data_size, sizeof(void *)));

    if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
               tr->data.ptr.buffer, tr->data_size)) {
        binder_user_error("%d:%d got transaction with invalid data ptr\n",
                proc->pid, thread->pid);
        return_error = BR_FAILED_REPLY;
        goto err_copy_data_failed;
    }

    if (copy_from_user(offp, (const void __user *)(uintptr_t)
               tr->data.ptr.offsets, tr->offsets_size)) {
        binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
                proc->pid, thread->pid);
        return_error = BR_FAILED_REPLY;
        goto err_copy_data_failed;
    }
    if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
        binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
                proc->pid, thread->pid, (u64)tr->offsets_size);
        return_error = BR_FAILED_REPLY;
        goto err_bad_offset;
    }
    off_end = (void *)offp + tr->offsets_size;
    for (; offp < off_end; offp++) {
        struct flat_binder_object *fp;

        if (*offp > t->buffer->data_size - sizeof(*fp) ||
            t->buffer->data_size < sizeof(*fp) ||
            !IS_ALIGNED(*offp, sizeof(u32))) {
            binder_user_error("%d:%d got transaction with invalid offset, %lld\n",
                      proc->pid, thread->pid, (u64)*offp);
            return_error = BR_FAILED_REPLY;
            goto err_bad_offset;
        }
        fp = (struct flat_binder_object *)(t->buffer->data + *offp);                   ⑦
        switch (fp->type) {
        case BINDER_TYPE_BINDER:
        case BINDER_TYPE_WEAK_BINDER: {
            struct binder_ref *ref;
            struct binder_node *node = binder_get_node(proc, fp->binder);                 ⑧

            if (node == NULL) {
                node = binder_new_node(proc, fp->binder, fp->cookie);
                if (node == NULL) {
                    return_error = BR_FAILED_REPLY;
                    goto err_binder_new_node_failed;
                }
                node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
                node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
            }
            if (fp->cookie != node->cookie) {
                binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
                    proc->pid, thread->pid,
                    (u64)fp->binder, node->debug_id,
                    (u64)fp->cookie, (u64)node->cookie);
                return_error = BR_FAILED_REPLY;
                goto err_binder_get_ref_for_node_failed;
            }
            if (security_binder_transfer_binder(proc->tsk,
                                target_proc->tsk)) {
                return_error = BR_FAILED_REPLY;
                goto err_binder_get_ref_for_node_failed;
            }
            ref = binder_get_ref_for_node(target_proc, node);                            ⑨
            if (ref == NULL) {
                return_error = BR_FAILED_REPLY;
                goto err_binder_get_ref_for_node_failed;
            }
            if (fp->type == BINDER_TYPE_BINDER)                                          ⑩
                fp->type = BINDER_TYPE_HANDLE;
            else
                fp->type = BINDER_TYPE_WEAK_HANDLE;
            fp->handle = ref->desc;                                                      ①①
            binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
                       &thread->todo);

            trace_binder_transaction_node_to_ref(t, node, ref);
            binder_debug(BINDER_DEBUG_TRANSACTION,
                     "        node %d u%016llx -> ref %d desc %d\n",
                     node->debug_id, (u64)node->ptr,
                     ref->debug_id, ref->desc);
        } break;

    .........

if (reply) {
        BUG_ON(t->buffer->async_transaction != 0);
        binder_pop_transaction(target_thread, in_reply_to);
    } else if (!(t->flags & TF_ONE_WAY)) {
        BUG_ON(t->buffer->async_transaction != 0);
        t->need_reply = 1;
        t->from_parent = thread->transaction_stack;
        thread->transaction_stack = t;
    } else {
        BUG_ON(target_node == NULL);
        BUG_ON(t->buffer->async_transaction != 1);
        if (target_node->has_async_transaction) {
            target_list = &target_node->async_todo;
            target_wait = NULL;
        } else
            target_node->has_async_transaction = 1;
    }
    t->work.type = BINDER_WORK_TRANSACTION;
    list_add_tail(&t->work.entry, target_list);                                     ①②
    tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
    list_add_tail(&tcomplete->entry, &thread->todo);
    if (target_wait)
        wake_up_interruptible(target_wait);
    return;

.....

}
①: 目標handle不爲0時說明是客戶端調用服務端的情況;
②: 目標handle爲0,說明是請求ServiceManager服務,保存目標節點struct binder_node;
③: 根據binder_node獲取到binder_proc;
④: 判斷此次調用是否需要reply;
⑤: 根據transaction_stack找到目標線程(第一次傳輸不會進來);
⑥: 目標進程的在mmap空間分配一塊buf,接着調用copy_from_use將用戶空間數據拷貝進剛分配的buf中,這樣目標進程可以直接讀取數據;
⑦: 獲取struct flat_binder_object的首地址, offp保存的是object距數據頭的偏移值,詳細可以看下C服務應用的3.2節;
⑧: 爲新傳進來的binder實體構造一個binder_node;
⑨: 查看目標進程的refs_by_node紅黑樹上是否有ref指向該節點,如果沒有則爲該目標進程創建一個ref指向該node;
⑩: 原來的類型是binder實體,現在要傳給ServiceManager就需要改變爲handle引用類型;
①①: 把剛纔創建的ref中的des賦值給等下要串給應用層的數據, 接着給該節點增加一次引用標記且會將一些事務加入自身線程的todo鏈表;
①②: 將需要處理的事務加入目標進程或目標線程的todo鏈表,並喚醒它;

現在ServiceManager進程的binder_proc中的refs_by_node紅黑樹上掛有一個新的binder_ref指向了傳進來的binder實體;
且這個新掛上去的binder_refdesc成員爲1(即傳給應用層的handle),因爲這是第一個指向該節點的引用,以後會遞增;

4.1.2 binder_thread_read

  • 第一次read:

在寫操作完後就開始讀操作了,因爲剛開始進程和線程的todo鏈表中沒有需要處理的事務,再回復了BR_NOOP後就開始睡眠了;

寫操作的時候有爲創建的binder實體的node增加引用並加入了todo鏈表,這時led_control_service進程被喚醒;
開始處理BINDER_WORK_NODE事務,命令爲BR_INCREFS, BR_ACQUIRE等;

  • 第二次read:

這次read是在處理完BR_INCREFS, BR_ACQUIRE等命令以後,又一次讀數據,並進入睡眠;

PS: 這可以先不看,繼續往下看ServiceManager被喚醒的流程;

好了,回到led_control_service進程了,被ServiceManager喚醒了;
這也沒做啥事,就是構造數據後將其傳回了用戶空間;
接着發送釋放buf的命令給內核空間,讓它釋放了內核mmap分配的數據;

4.1.3 ServiceManager被喚醒

4.1.3.1 binder_thread_read

該函數解析詳細看3.2.3,這裏說下ServiceManager進程被喚醒後做了哪些事情,

①: 先取出進程todo鏈表中需要處理的事務;
②: 再找到處理事務的struct binder_transaction結構體地址,取出剛纔從發送進程拷貝進mmap分配的空間中數據進行處理;
③: 發送給ServiceManager的用戶空間;
接着用戶空間就會開始處理數據,數據格式如下:

用戶空間在在收到數據後解析到BR_TRANSACTION命令後做了的流程分析見C服務應用篇2.4節;

4.1.3.2 binder_thread_write

    1. 應用層在調用do_add_serivice時最後還向驅動寫入了兩個命令(BC_ACQUIREBC_REQUEST_DEATH_NOTIFICATION);
    1. 執行完fun後寫入reply,又寫入兩個命令(BC_FREE_BUFFERBC_REPLY);

4.1.3.2.1 BC_ACQUIRE


        switch (cmd) {
        case BC_INCREFS:
        case BC_ACQUIRE:
        case BC_RELEASE:
        case BC_DECREFS: {
            uint32_t target;
            struct binder_ref *ref;
            const char *debug_string;

            if (get_user(target, (uint32_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(uint32_t);
            if (target == 0 && binder_context_mgr_node &&                                    ①
                (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
                ref = binder_get_ref_for_node(proc,
                           binder_context_mgr_node);
                if (ref->desc != target) {
                    binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
                        proc->pid, thread->pid,
                        ref->desc);
                }
            } else
                ref = binder_get_ref(proc, target);
            if (ref == NULL) {
                binder_user_error("%d:%d refcount change on invalid ref %d\n",
                    proc->pid, thread->pid, target);
                break;
            }
            switch (cmd) {
            case BC_INCREFS:
                debug_string = "IncRefs";
                binder_inc_ref(ref, 0, NULL);
                break;
            case BC_ACQUIRE:                                                                ②
                debug_string = "Acquire";
                binder_inc_ref(ref, 1, NULL);
                break;
            case BC_RELEASE:
                debug_string = "Release";
                binder_dec_ref(ref, 1);
                break;
            case BC_DECREFS:
            default:
                debug_string = "DecRefs";
                binder_dec_ref(ref, 0);
                break;
            }
            binder_debug(BINDER_DEBUG_USER_REFS,
                     "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
                     proc->pid, thread->pid, debug_string, ref->debug_id,
                     ref->desc, ref->strong, ref->weak, ref->node->debug_id);
            break;
        }
①: 這個是根據傳進來的handle獲取binder_ref;
②: 對剛纔獲取到ref增加強引用;

4.1.3.2.2 BC_REQUEST_DEATH_NOTIFICATION

case BC_REQUEST_DEATH_NOTIFICATION:
        case BC_CLEAR_DEATH_NOTIFICATION: {
            uint32_t target;
            binder_uintptr_t cookie;
            struct binder_ref *ref;
            struct binder_ref_death *death;

            if (get_user(target, (uint32_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(uint32_t);
            if (get_user(cookie, (binder_uintptr_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(binder_uintptr_t);
            ref = binder_get_ref(proc, target);                                    ①
            if (ref == NULL) {
                break;
            }

            if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
                if (ref->death) {
                    break;
                }
                death = kzalloc(sizeof(*death), GFP_KERNEL);
                if (death == NULL) {
                    thread->return_error = BR_ERROR;
                    break;
                }
                binder_stats_created(BINDER_STAT_DEATH);
                INIT_LIST_HEAD(&death->work.entry);
                death->cookie = cookie;
                ref->death = death;                                                  ②
                if (ref->node->proc == NULL) {
                    ref->death->work.type = BINDER_WORK_DEAD_BINDER;
                    if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
                        list_add_tail(&ref->death->work.entry, &thread->todo);
                    } else {
                        list_add_tail(&ref->death->work.entry, &proc->todo);
                        wake_up_interruptible(&proc->wait);
                    }
                }
            } else {
                if (ref->death == NULL) {
                    break;
                }
                death = ref->death;
                if (death->cookie != cookie) {
                    break;
                }
                ref->death = NULL;
                if (list_empty(&death->work.entry)) {
                    death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
                    if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
                        list_add_tail(&death->work.entry, &thread->todo);
                    } else {
                        list_add_tail(&death->work.entry, &proc->todo);
                        wake_up_interruptible(&proc->wait);
                    }
                } else {
                    BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
                    death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
                }
            }
        } break;
①: 根據handle獲取ref;
②: 講死亡通知掛到ref的death節點上;

這樣操作後在這個ref指向的node節點的進程(這個場景爲led_control_service),在死亡時會反饋給ServiceManager;

4.1.3.2.3 BC_FREE_BUFFER

        case BC_FREE_BUFFER: {
            binder_uintptr_t data_ptr;
            struct binder_buffer *buffer;

            if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(binder_uintptr_t);

            buffer = binder_buffer_lookup(proc, data_ptr);                            ①
            if (buffer == NULL) {
                break;
            }
            if (!buffer->allow_user_free) {
                break;
            }

            if (buffer->transaction) {
                buffer->transaction->buffer = NULL;
                buffer->transaction = NULL;
            }
            if (buffer->async_transaction && buffer->target_node) {
                BUG_ON(!buffer->target_node->has_async_transaction);
                if (list_empty(&buffer->target_node->async_todo))
                    buffer->target_node->has_async_transaction = 0;
                else
                    list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
            }
            trace_binder_transaction_buffer_release(buffer);
            binder_transaction_buffer_release(proc, buffer, NULL);                   ②
            binder_free_buf(proc, buffer);
            break;
        }
①: 根據data.ptr.buffer的地址找到前面爲拷貝led_control_service寫入內核的數據而分配的mmap緩存區地址(詳見4.1.1.2);
②: 釋放那塊buf;

這裏需要注意data_ptr雖然是用戶空間傳來的,但是這也是由內核空間拷貝給用戶空間的且該值在用戶空間未改變;

4.1.3.3.4 BC_REPLY

        case BC_TRANSACTION:
        case BC_REPLY: {
            struct binder_transaction_data tr;

            if (copy_from_user(&tr, ptr, sizeof(tr)))
                return -EFAULT;
            ptr += sizeof(tr);
            binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
            break;
        }

這個流程在前面將BC_TRANSACTION爲講解,這裏我們單獨講解下;

static void binder_transaction(struct binder_proc *proc,
                   struct binder_thread *thread,
                   struct binder_transaction_data *tr, int reply)
{

    .......

    if (reply) {
        in_reply_to = thread->transaction_stack;
        if (in_reply_to == NULL) {
            binder_user_error("%d:%d got reply transaction with no transaction stack\n",
                      proc->pid, thread->pid);
            return_error = BR_FAILED_REPLY;
            goto err_empty_call_stack;
        }
        binder_set_nice(in_reply_to->saved_priority);
        if (in_reply_to->to_thread != thread) {
            return_error = BR_FAILED_REPLY;
            in_reply_to = NULL;
            goto err_bad_call_stack;
        }
        thread->transaction_stack = in_reply_to->to_parent;                    ①
        target_thread = in_reply_to->from;
        if (target_thread == NULL) {
            return_error = BR_DEAD_REPLY;
            goto err_dead_binder;
        }
        if (target_thread->transaction_stack != in_reply_to) {
            return_error = BR_FAILED_REPLY;
            in_reply_to = NULL;
            target_thread = NULL;
            goto err_dead_binder;
        }
        target_proc = target_thread->proc;                                     ②
    } else {
       ......
    }

    .....

  if (target_thread) {                                                    ③
        e->to_thread = target_thread->pid;
        target_list = &target_thread->todo;
        target_wait = &target_thread->wait;
    } else {
        target_list = &target_proc->todo;
        target_wait = &target_proc->wait;
    }
    ......
①: 從線程的傳輸棧上找到目標線程(當前進程爲ServiceManager進程);
②: 通過目標線程查找到目標線程;
③: 這裏可以看出reply是用線程的來完成的,因爲是將要處理事情的是線程的todo鏈表;

接下來從用戶空間拷貝數據,然後在將事務掛到目標線程的todo鏈表,再喚醒目標線程;
這樣又回到了led_control_service進程了,請看4.1.2;

4.2 設置線程上限

    case BINDER_SET_MAX_THREADS:
        if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
            ret = -EINVAL;
            goto err;
        }
        break;

將上限值拷貝到proc的max_threads成員中保存;

4.3 總結流程

從註冊服務開始說起:

clipboard.png

五. mmap用點

以下進程都是在內核態的描述;

5.1 內核態

在查看驅動源碼時,發現註冊服務時led_control_service進程將用戶空間數據拷貝到內核後,再喚醒ServiceManager進程後,ServiceManager進程內核空間可以直接使用;

5.2 用戶態

還有一點,在ServiceManager將內核空間數據拷貝到用戶空間時,僅僅只是把剛纔在led_control_service進程分配的mmap空間的地址傳給了 ServiceManager的用戶空間,而用戶空間可以通過該地址直接訪問數據了;

以上爲mmap在binder的兩點用法,跨進程,內核;

PS: 篇幅太長了,服務的獲取流程下篇再講,binder的知識點很多,還有好多需要更新;
看我寫的東西一點要從上往下看, C語言寫的多了養成的習慣;

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章