Android源碼閱讀---Service Manager進程

Android源碼閱讀—Service Manager進程


ServiceManager 是 android中IPC機制的關鍵組件,它是一個bin可執行程序,由init進程開啓並作爲一個系統服務一直運行,主要爲各個bind客戶端提供服務查詢、爲binder服務端提供註冊等功能,如果沒有該該服務, android的binder機制將無法正常工作。

1. 編譯命令

include $(CLEAR_VARS)
LOCAL_SHARED_LIBRARIES := liblog libcutils libselinux
LOCAL_SRC_FILES := service_manager.c binder.c
LOCAL_CFLAGS += $(svc_c_flags)
LOCAL_MODULE := servicemanager
LOCAL_INIT_RC := servicemanager.rc
include $(BUILD_EXECUTABLE)

從這個編譯命令來看,這個bin程序涉及到的源文件並不多,主要有service_manager.c和binder.c,然後它的配置文件單獨拿出來,存放在編譯文件同級目錄下

service servicemanager /system/bin/servicemanager
    class core
    user system
    group system readproc
    critical
    onrestart restart healthd
    onrestart restart zygote
    onrestart restart audioserver
    onrestart restart media
    onrestart restart surfaceflinger
    onrestart restart inputflinger
    onrestart restart drm
    onrestart restart cameraserver
    writepid /dev/cpuset/system-background/tasks
    onrestart restart filelock

從配置文件可以看出,該servicemanager 屬於core組,一旦該服務重啓,其他系統服務如healthd、zygote、audioserver等也會restart
然後在init.rc中,在boot觸發階段有一個class_start core的 command, 它對應的函數static int do_class_start(const std::vector<std::string>& args)將會start服務塊鏈表中class爲core的service,因爲servicemanager 也屬於core組,所以在boot觸發階段,init進程也會開啓一個子進程,然後讓子進程開始運行servicemanager。

on boot
    # basic network init
    ifup lo
    hostname localhost
    domainname localdomain

    # Memory management. Basic kernel parameters, and allow the high
    # level system server to be able to adjust the kernel OOM driver
    # parameters to match how it is managing things.
    write /proc/sys/vm/overcommit_memory 1
/*省略*/
    # Define default initial receive window size in segments.
    setprop net.tcp.default_init_rwnd 60
    class_start core

2. main函數流程

分析一個bin進程,首先看看它的 main函數,在該main函數中主要做了:

  1. 使用函數binder_open打開binder設備
  2. 將自己設置爲binder
  3. binder_loop循環
int main()
{
    struct binder_state *bs;
    bs = binder_open(128*1024);/*128k*/
    if (!bs) {
        ALOGE("failed to open binder driver\n");
        return -1;
    }
    if (binder_become_context_manager(bs)) {
        ALOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }
    selinux_enabled = is_selinux_enabled();
    sehandle = selinux_android_service_context_handle();
    selinux_status_open(true);
    if (selinux_enabled > 0) {
        if (sehandle == NULL) {
            ALOGE("SELinux: Failed to acquire sehandle. Aborting.\n");
            abort();
        }
        if (getcon(&service_manager_context) != 0) {
            ALOGE("SELinux: Failed to acquire service_manager context. Aborting.\n");
            abort();
        }
    }
    union selinux_callback cb;
    cb.func_audit = audit_callback;
    selinux_set_callback(SELINUX_CB_AUDIT, cb);
    cb.func_log = selinux_log_callback;
    selinux_set_callback(SELINUX_CB_LOG, cb);
    binder_loop(bs, svcmgr_handler);
    return 0;
}

3. 打開binder設備

binder_open主要是做一個內存映射的工作,核心在函數mmap中,

struct binder_state
{
    int fd;
    void *mapped;
    size_t mapsize;
};
struct binder_state *binder_open(size_t mapsize)
{
    struct binder_state *bs;
    struct binder_version vers;
    bs = malloc(sizeof(*bs));/*爲結構體binder_state 分配空間,該結構體中記錄了關於binder的信息*/
    if (!bs) {
        errno = ENOMEM;
        return NULL;
    }
    bs->fd = open("/dev/binder", O_RDWR | O_CLOEXEC);/*打開驅動節點*/
    if (bs->fd < 0) {
        fprintf(stderr,"binder: cannot open device (%s)\n",
                strerror(errno));
        goto fail_open;
    }
    if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||  (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
        fprintf(stderr,
                "binder: kernel driver version (%d) differs from user space version (%d)\n",
                vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
        goto fail_open;
    }
    bs->mapsize = mapsize;/* 設定的爲128k*/
    bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);/*到此,結構體binder_state 填充完成*/
    if (bs->mapped == MAP_FAILED) {
        fprintf(stderr,"binder: cannot map device (%s)\n",
                strerror(errno));
        goto fail_map;
    }
    return bs;
fail_map:
    close(bs->fd);
fail_open:
    free(bs);
    return NULL;
}

4. 將自己設置爲binder

int binder_become_context_manager(struct binder_state *bs)
{
    return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);/*使用命令BINDER_SET_CONTEXT_MGR將自己設置爲 binder 管家*/
}

5. loop循環

循環的做下面2步操作

  • 對binder驅動執行讀信息
  • 將拿到的信息進行處理
void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr; /*binder讀寫需要的格式*/
/*
struct binder_write_read {
  binder_size_t write_size;
 binder_size_t write_consumed;
// WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS 
  binder_uintptr_t write_buffer;
  binder_size_t read_size;
 binder_size_t read_consumed;
 binder_uintptr_t read_buffer;
// WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS 
};
*/
    uint32_t readbuf[32]; /* 數據讀取的緩衝空間*/

    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;

    readbuf[0] = BC_ENTER_LOOPER;
    binder_write(bs, readbuf, sizeof(uint32_t));/*會把readbuf數據寫到binder驅動中中*/
/*開始循環*/
    for (;;) {
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;/*會把數據讀到readbuf中*/
        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);/*執行讀信息操作*/
        if (res < 0) {
            ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
            break;
        }
/*將讀取到的信息開始解析*/
        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
        if (res == 0) {
            ALOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }/*for 循環結束*/
}

5.1 從binder中讀數據

從binder中支持 ioctl(bs->fd, BINDER_WRITE_READ, &bwr)讀取操作並把數據寫入到readbuf中

5.2 解析數據

將第一步讀取到的數據進行解析,具體實現在函數binder_parse中執行
首先,將數據中的第一個uint32_t 作爲一個cmd命令
然後,根據cmd命令來判斷執行各種的邏輯

int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uintptr_t ptr, size_t size, binder_handler func)
{
    int r = 1;
    uintptr_t end = ptr + (uintptr_t) size;/*數據的結束地址*/
    while (ptr < end) {
        uint32_t cmd = *(uint32_t *) ptr;/*將第一個uint32_t   取出來 存到臨時變量cmd 中,後續根據這個cmd 指令覺定走哪個switch語句*/
        ptr += sizeof(uint32_t);/*然後指針向下移動一個uint32_t*/
        switch(cmd) {
        case BR_NOOP:
            break;
        case BR_TRANSACTION_COMPLETE:
            break;
        case BR_INCREFS:
        case BR_ACQUIRE:
        case BR_RELEASE:
        case BR_DECREFS:
            ptr += sizeof(struct binder_ptr_cookie);
            break;
        case BR_TRANSACTION: {
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            if ((end - ptr) < sizeof(*txn)) {
                ALOGE("parse: txn too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (func) {
                unsigned rdata[256/4];
                struct binder_io msg;
                struct binder_io reply;
                int res;

                bio_init(&reply, rdata, sizeof(rdata), 4);
                bio_init_from_txn(&msg, txn);
                res = func(bs, txn, &msg, &reply);
                if (txn->flags & TF_ONE_WAY) {
                    binder_free_buffer(bs, txn->data.ptr.buffer);
                } else {
                    binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
                }
            }
            ptr += sizeof(*txn);
            break;
        }
        case BR_REPLY: {
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            if ((end - ptr) < sizeof(*txn)) {
                ALOGE("parse: reply too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (bio) {
                bio_init_from_txn(bio, txn);
                bio = 0;
            } else {
                /* todo FREE BUFFER */
            }
            ptr += sizeof(*txn);
            r = 0;
            break;
        }
        case BR_DEAD_BINDER: {
            struct binder_death *death = (struct binder_death *)(uintptr_t) *(binder_uintptr_t *)ptr;
            ptr += sizeof(binder_uintptr_t);
            death->func(bs, death->ptr);
            break;
        }
        case BR_FAILED_REPLY:
            r = -1;
            break;
        case BR_DEAD_REPLY:
            r = -1;
            break;
        default:
            ALOGE("parse: OOPS %d\n", cmd);
            return -1;
        }
    }
    return r;
}

然後主要看看BR_TRANSACTION、BR_REPLY、BR_DEAD_BINDER這3個命令做了什麼操作

5.2.1 BR_TRANSACTION

        case BR_TRANSACTION: {
/*如果smd是BR_TRANSACTION 命令,後面直接當成是binder_transaction_data 類型數據*/
/*
struct binder_transaction_data {
// WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS 
  union { __u32 handle; binder_uintptr_t ptr;} target;
//WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS
  binder_uintptr_t cookie;
  __u32 code;
  __u32 flags;
  pid_t sender_pid;
  uid_t sender_euid;
  binder_size_t data_size;
  binder_size_t offsets_size;
  union { struct {binder_uintptr_t buffer; binder_uintptr_t offsets; } ptr;
    __u8 buf[8];
  } data;
};
*/
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            if ((end - ptr) < sizeof(*txn)) {/*數據大小校驗*/
                ALOGE("parse: txn too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (func) {
                unsigned rdata[256/4];
                struct binder_io msg;
                struct binder_io reply;
                int res;
                bio_init(&reply, rdata, sizeof(rdata), 4); /*初始化bio對象*/
                bio_init_from_txn(&msg, txn);
                res = func(bs, txn, &msg, &reply);/*主要使用函數svcmgr_handler來處理txn中的數據,然後結果存在reply中*/
                if (txn->flags & TF_ONE_WAY) {
                    binder_free_buffer(bs, txn->data.ptr.buffer);
                } else {
                    binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
                }
            }
            ptr += sizeof(*txn);/* 再偏移binder_transaction_data 個位置,因爲這部分數據已經處理了*/
            break;
        }

svcmgr_handler函數邏輯

int svcmgr_handler(struct binder_state *bs, struct binder_transaction_data *txn,struct binder_io *msg, struct binder_io *reply)
{
    struct svcinfo *si;
    uint16_t *s;
    size_t len;
    uint32_t handle;
    uint32_t strict_policy;
    int allow_isolated;
    if (txn->target.ptr != BINDER_SERVICE_MANAGER)/*如果從binder驅動讀到的信息不是發給sm的就退出*/
        return -1;

    if (txn->code == PING_TRANSACTION)
        return 0;

    // Equivalent to Parcel::enforceInterface(), reading the RPC header with the strict mode policy mask and the interface name.
    // Note that we ignore the strict_policy and don't propagate it further (since we do no outbound RPCs anyway).
    strict_policy = bio_get_uint32(msg);
    s = bio_get_string16(msg, &len);
    if (s == NULL) {
        return -1;
    }

    if ((len != (sizeof(svcmgr_id) / 2)) ||
        memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
        fprintf(stderr,"invalid id %s\n", str8(s, len));
        return -1;
    }
    if (sehandle && selinux_status_updated() > 0) {
        struct selabel_handle *tmp_sehandle = selinux_android_service_context_handle();
        if (tmp_sehandle) {
            selabel_close(sehandle);
            sehandle = tmp_sehandle;
        }
    }
/*然後又根據binder_transaction_data 面的code 執行不同的case
1. SVC_MGR_CHECK_SERVICE和SVC_MGR_GET_SERVICE都是通過字符串s中的名字來得到對應server的handle
2. SVC_MGR_ADD_SERVICE用來註冊一個server
3. SVC_MGR_LIST_SERVICES用來獲取整個server列表
*/
    switch(txn->code) {
    case SVC_MGR_GET_SERVICE:
    case SVC_MGR_CHECK_SERVICE:
        s = bio_get_string16(msg, &len);
        if (s == NULL) {
            return -1;
        }
        handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid);/*具體的查找函數,根據s來查找對應handle */
        if (!handle)
            break;
        bio_put_ref(reply, handle);/*將handle通過reply返回給上層調用函數*/
        return 0;

    case SVC_MGR_ADD_SERVICE:
        s = bio_get_string16(msg, &len);
        if (s == NULL) {
            return -1;
        }
        handle = bio_get_ref(msg);
        allow_isolated = bio_get_uint32(msg) ? 1 : 0;
        if (do_add_service(bs, s, len, handle, txn->sender_euid,
            allow_isolated, txn->sender_pid))
            return -1;
        break;

    case SVC_MGR_LIST_SERVICES: {
        uint32_t n = bio_get_uint32(msg);

        if (!svc_can_list(txn->sender_pid, txn->sender_euid)) {
            ALOGE("list_service() uid=%d - PERMISSION DENIED\n",
                    txn->sender_euid);
            return -1;
        }
        si = svclist;
        while ((n-- > 0) && si)
            si = si->next;
        if (si) {
            bio_put_string16(reply, si->name);
            return 0;
        }
        return -1;
    }
    default:
        ALOGE("unknown code %d\n", txn->code);
        return -1;
    }

    bio_put_uint32(reply, 0);
    return 0;
}

所以在binder_parse函數中,如果讀到的信息中cmd命令是BR_TRANSACTION,表示有他進程在向sm進行服務的查詢或者註冊等,而sm主要的功能就是對服務的管理,看來sm主要邏輯就是走這個switch分支了。所以到此,整個service manager二進制程序的主要邏輯就瞭解完了。

5.2.2 BR_REPLY

和service manager程序相關性不大,

5.23 BR_DEAD_BINDER

和service manager程序相關性不大,

在這裏插入圖片描述

參考 《深入理解android內核設計思想》

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章