Android Binder 驅動 - Media 服務的添加過程

相關文章鏈接:

1. Android FrameWork - 學習啓動篇
2. Android FrameWork - 開機啓動 Init 進程
3. Android 開發者需要知道的 Linux 知識
4. 從 Linux 內核的角度來看 Binder 驅動
5. JNI 基礎 - Android 共享內存的序列化過程
6. Android進程間通信(IPC)機制Binder簡要介紹和學習計劃

相關源碼文件:

/frameworks/av/media/mediaserver/main_mediaserver.cpp
/frameworks/native/libs/binder/ProcessState.cpp
/frameworks/av/media/libmediaplayerservice/MediaPlayerService.cpp
/frameworks/native/libs/binder/IServiceManager.cpp
/frameworks/native/include/binder/IInterface.h
/frameworks/native/libs/binder/IServiceManager.cpp
/frameworks/native/libs/binder/Binder.cpp
/frameworks/native/libs/binder/IPCThreadState.cpp

Media 進程是由 init 進程通過解析 init.rc 文件而創建的。

service media /system/bin/mediaserver 
    class main
    user media
    group audio camera inet net_bt net_bt_admin net_bw_acct drmrpc mediadrm     
    ioprio rt 4

可執行文件對應的源碼在 /frameworks/av/media/mediaserver/main_mediaserver.cpp 中,我們找到 main 方法,注意這裏分析源碼我們主要關心 Binder 驅動:

int main(int argc __unused, char** argv)
{
    ...
    InitializeIcuOrDie();
    // ProcessState 是一個單例, sp<ProcessState> 就看成是 ProcessState
    sp<ProcessState> proc(ProcessState::self());
    ...
    // 註冊 MediaPlayerService 服務
    MediaPlayerService::instantiate();
    ...
    // 啓動 Binder 線程池
    ProcessState::self()->startThreadPool();
    // 當前線程加入到線程池
    IPCThreadState::self()->joinThreadPool();
 }

sp<ProcessState> ProcessState::self()
{
    Mutex::Autolock _l(gProcessMutex);
    if (gProcess != NULL) {
        return gProcess;
    }

    // 實例化 ProcessState , 跳轉到構造函數
    gProcess = new ProcessState;
    return gProcess;
}

ProcessState::ProcessState()
    : mDriverFD(open_driver()) // 注意這裏還有個 open_driver() 函數,打開 Binder 驅動 
    , mVMStart(MAP_FAILED)
    , mThreadCountLock(PTHREAD_MUTEX_INITIALIZER)
    , mThreadCountDecrement(PTHREAD_COND_INITIALIZER)
    , mExecutingThreadsCount(0)
    , mMaxThreads(DEFAULT_MAX_BINDER_THREADS)
    , mManagesContexts(false)
    , mBinderContextCheckFunc(NULL)
    , mBinderContextUserData(NULL)
    , mThreadPoolStarted(false)
    , mThreadPoolSeq(1)
{
    if (mDriverFD >= 0) {
        // 採用內存映射函數 mmap,給 binder 分配一塊虛擬地址空間
        // BINDER_VM_SIZE = 1M - 8k
        mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
        if (mVMStart == MAP_FAILED) {
            // 沒有足夠空間分配給 /dev/binder, 則關閉驅動
            close(mDriverFD); 
            mDriverFD = -1;
        }
    }
}

static int open_driver()
{
    // 打開 /dev/binder 設備,建立與內核的 Binder 驅動的交互通道
    int fd = open("/dev/binder", O_RDWR);
    if (fd >= 0) {
        fcntl(fd, F_SETFD, FD_CLOEXEC);
        int vers = 0;
        status_t result = ioctl(fd, BINDER_VERSION, &vers);
        if (result == -1) {
            close(fd);
            fd = -1;
        }
        if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
            close(fd);
            fd = -1;
        }
        size_t maxThreads = DEFAULT_MAX_BINDER_THREADS;

        // 通過 ioctl 設置 binder 驅動,能支持的最大線程數
        result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads);
        if (result == -1) {
            ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno));
        }
    } else {
        ALOGW("Opening '/dev/binder' failed: %s\n", strerror(errno));
    }
    return fd;
}

binder 驅動是整個 Android FrameWork 中比較晦澀難懂的一個部分,早些年我也曾嘗試着去看這些代碼,但那時的第一感覺是這輩子都不可能看懂了。因此剛開始我們不需要去細扣,還有就是 linux 基礎知識很重要。這裏我們只需要瞭解 open 是打開驅動、mmap 是映射驅動、ioctl 是操作驅動、close 是關閉驅動,分別對應驅動層的 binder_open、binder_mmap、binder_ioctl 和 binder_colse 方法就可以了。接着看下 MediaPlayerService::instantiate() :

void MediaPlayerService::instantiate() {
    // 註冊服務 defaultServiceManager() =  new BpServiceManager(new BpBinder(0)) 
    defaultServiceManager()->addService(String16("media.player"), new MediaPlayerService());
}

sp<IServiceManager> defaultServiceManager(){
    // 其實也是一個單例,但剛開始肯定是 == NULL
    if(gDefaultServiceManager!=NULL)return gDefaultServiceManager;
    // 加一把自動鎖
    AutoMutex _l(gDefaultServiceManagerLock);
    while(gDefaultServiceManager==NULL){
        // ProcessState::self() 上面介紹過了,這裏主要看 getContextObject(NULL)
        gDefaultServiceManager=interface_cast<IServiceManager> (
        ProcessState::self()->getContextObject(NULL));
        // 有可能 ServiceManager 進程還沒來的及初始化,適當等待
        if(gDefaultServiceManager==NULL){
            sleep(1);
        }
    }
    return gDefaultServiceManager;
}

sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/) {
    return getStrongProxyForHandle(0);
}

sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle) {
    sp<IBinder> result;
    AutoMutex _l (mLock);
    handle_entry * e = lookupHandleLocked(handle);
    if (e != NULL) {
        //  handle_entry 是從緩存裏面獲取的,剛開始 e -> binder 是空
        IBinder * b = e -> binder;
        if (b == NULL || !e -> refs -> attemptIncWeak(this)) {
            // 這裏 handle 是 0 ,PING_TRANSACTION 看 ServiceManager 進程能不能訪問
            if (handle == 0) {
                Parcel data;
                status_t status = IPCThreadState::self () -> transact(
                        0, IBinder::PING_TRANSACTION, data, NULL, 0);
                if (status == DEAD_OBJECT) {
                        return NULL;
                }
            }
            // new BpBinder(0);
            b = new BpBinder(handle);
            e -> binder = b;
            if (b) e -> refs = b -> getWeakRefs();
            result = b;
        } else {
            result.force_set(b);
            e -> refs -> decWeak(this);
        }
    }
    return result;
}

ProcessState::self()->getContextObject(NULL) 返回的是 new BpBinder(0) , handle 值爲 0 代表是 ServiceManager 進程,關於 ServiceManager 的啓動過程後面會分析到。我們接着往下看 interface_cast<IServiceManager>(new BpBinder(0))

template<typename INTERFACE>
inline sp<INTERFACE> interface_cast(const sp<IBinder>&obj) {
    return INTERFACE::asInterface (obj);
}

DECLARE_META_INTERFACE(ServiceManager);

define DECLARE_META_INTERFACE(INTERFACE)                               
static const android::String16 descriptor;                          
static android::sp<I##INTERFACE> asInterface(const android::sp<android::IBinder>& obj);                  
virtual const android::String16& getInterfaceDescriptor() const;
I##INTERFACE();                                                     
virtual ~I##INTERFACE();

IMPLEMENT_META_INTERFACE(ServiceManager, "android.os.IServiceManager");

#define IMPLEMENT_META_INTERFACE(INTERFACE, NAME)
const android::String16 I##INTERFACE::descriptor(NAME);
const android::String16&I##INTERFACE::getInterfaceDescriptor() const {
    return I##INTERFACE::descriptor;
}
android::sp<I##INTERFACE> I##INTERFACE::asInterface(const android::sp<android::IBinder>& obj) {
    android::sp<I##INTERFACE> intr;
    if (obj != NULL) {
        intr = static_cast<I##INTERFACE*>(obj->queryLocalInterface(I##INTERFACE::descriptor).get());
        if (intr == NULL) {
            intr = new Bp##INTERFACE(obj);
        }
    }
    return intr;
}
I##INTERFACE::I##INTERFACE() { }
I##INTERFACE::~I##INTERFACE() { }

上面主要是宏定義的展開和替換,剛開始看我也是一頭霧水,在 IServiceManager 中找了半天也沒找到 asInterface 方法, 因此這裏最終返回的是 BpServiceManager

virtual status_t addService(const String16& name, const sp<IBinder>& service, bool allowIsolated) {
    // Parcel 是內存共享讀寫
    Parcel data, reply; 
    // 寫入頭信息 "android.os.IServiceManager" ,ServiceManager 進程收到請求後會判斷
    data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());   
    // name爲 "media.player"
    data.writeString16(name);    
    // MediaPlayerService 對象 
    data.writeStrongBinder(service);
    // allowIsolated = false
    data.writeInt32(allowIsolated ? 1 : 0); 
    // remote() 指向的是 BpBinder 對象
    status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
    return err == NO_ERROR ? reply.readExceptionCode() : err;
}

status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
{
    return flatten_binder(ProcessState::self(), val, this);
}

status_t flatten_binder(const sp<ProcessState>& /*proc*/,
    const sp<IBinder>& binder, Parcel* out)
{
    flat_binder_object obj;

    obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
    if (binder != NULL) {
        // 本地 Binder 不爲空,返回的是 this ,也就是 MediaPlayerService 對象
        IBinder *local = binder->localBinder(); 
        if (!local) {
            BpBinder *proxy = binder->remoteBinder();
            const int32_t handle = proxy ? proxy->handle() : 0;
            obj.type = BINDER_TYPE_HANDLE;
            obj.binder = 0;
            obj.handle = handle;
            obj.cookie = 0;
        } else { 
            // 進入該分支,type 是 BINDER_TYPE_BINDER 
            obj.type = BINDER_TYPE_BINDER;
            obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
            // cookie 傳的是強引用也就是 MediaPlayerService 對象的地址
            obj.cookie = reinterpret_cast<uintptr_t>(local);
        }
    } else {
        ...
    }
    
    return finish_flatten_binder(binder, obj, out);
}

inline static status_t finish_flatten_binder(
    const sp<IBinder>& , const flat_binder_object& flat, Parcel* out)
{
    return out->writeObject(flat, false);
}

status_t BpBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    if (mAlive) {
        // code=ADD_SERVICE_TRANSACTION 交給了 IPCThreadState::self()
        status_t status = IPCThreadState::self()->transact(
            mHandle, code, data, reply, flags);
        if (status == DEAD_OBJECT) mAlive = 0;
        return status;
    }
    return DEAD_OBJECT;
}

/* TLS是指 Thread local storage (線程本地儲存空間),每個線程都擁有自己的TLS,並且是私有空間,線程之間不會共享,
從線程本地存儲空間中獲得保存在其中的IPCThreadState對象,
與 Java 中的 ThreadLocal 類似。*/
IPCThreadState* IPCThreadState::self()
{
    if (gHaveTLS) {
restart:
        const pthread_key_t k = gTLS;
        IPCThreadState* st = (IPCThreadState*) pthread_getspecific(k);
        if (st) return st;
        // 初始IPCThreadState 
        return new IPCThreadState; 
    }

    pthread_mutex_lock(&gTLSMutex);
    // 首次進入 gHaveTLS 爲 false
    if (!gHaveTLS) {
        // 創建線程的TLS
        if (pthread_key_create(&gTLS, threadDestructor) != 0) {
            pthread_mutex_unlock(&gTLSMutex);
            return NULL;
        }
        gHaveTLS = true;
    }
    pthread_mutex_unlock(&gTLSMutex);
    goto restart;
}

IPCThreadState::IPCThreadState()
    : mProcess(ProcessState::self()),
      mMyThreadId(gettid()),
      mStrictModePolicy(0),
      mLastTransactionBinderFlags(0)
{
    // 通過 pthread_setspecific/pthread_getspecific  來設置獲取 IPCThreadState
    pthread_setspecific(gTLS, this);
    clearCaller();
    // mIn 用來接收來自 Binder 設備的數據
    mIn.setDataCapacity(256);
    // mOut用來存儲發往 Binder 設備的數據
    mOut.setDataCapacity(256);
}

上面有一個非常重要的結構體 flat_binder_object,參數分別有 type、 binder、handle 和 cookie ,這個是 binder 驅動處理的精髓之一。transact 最終交給了 IPCThreadState::self() 不同的線程有且只有一個單獨的 IPCThreadState 對象。

status_t IPCThreadState::transact(int32_t handle,
                                  uint32_t code, const Parcel& data,
                                  Parcel* reply, uint32_t flags)
{
    ....
    if (err == NO_ERROR) { 
        // 傳輸數據 
        err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
    }
    ...
    if ((flags & TF_ONE_WAY) == 0) {
        if (reply) {
            //等待響應 
            err = waitForResponse(reply);
        } else {
            Parcel fakeReply;
            err = waitForResponse(&fakeReply);
        }
    } else {
        // oneway,則不需要等待 reply 的場景
        err = waitForResponse(NULL, NULL);
    }
    return err;
}

status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
    int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
    binder_transaction_data tr;
    tr.target.ptr = 0;
    // handle = 0 ,代表是要轉發給 ServiceManager 進程
    tr.target.handle = handle;
    // code = ADD_SERVICE_TRANSACTION 動作是添加服務
    tr.code = code;         
    // binderFlags = 0   
    tr.flags = binderFlags;    
    tr.cookie = 0;
    tr.sender_pid = 0;
    tr.sender_euid = 0;

    // data 爲記錄 Media 服務信息的 Parcel 對象
    const status_t err = data.errorCheck();
    if (err == NO_ERROR) {
        // mDataSize, 這個裏面有多少數據
        tr.data_size = data.ipcDataSize();  
        // mData 
        tr.data.ptr.buffer = data.ipcData(); 
        // mObjectsSize
        tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t); 
        // mObjects
        tr.data.ptr.offsets = data.ipcObjects(); 
    } else if (statusBuffer) {
        ...
    } else {
        return (mLastError = err);
    }
    // cmd = BC_TRANSACTION ,  驅動找到 ServiceManager 後會像客戶端返回一個 BR_TRANSACTION_COMPLETE 表示通信請求已被接受,然後 Client 進入等待
    mOut.writeInt32(cmd);        
    // 寫入 binder_transaction_data 數據
    mOut.write(&tr, sizeof(tr)); 
    return NO_ERROR;
}

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    int32_t cmd;
    int32_t err;
    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break; 
        ...
        if (mIn.dataAvail() == 0) continue;

        cmd = mIn.readInt32();
        switch (cmd) {
            case BR_TRANSACTION_COMPLETE: ...
            case BR_DEAD_REPLY: ...
            case BR_FAILED_REPLY: ...
            case BR_ACQUIRE_RESULT: ...
            case BR_REPLY: ...
                goto finish;

            default:
                err = executeCommand(cmd); 
                if (err != NO_ERROR) goto finish;
                break;
        }
    }
    ...
    return err;
}

status_t IPCThreadState::talkWithDriver(bool doReceive)
{
    ...
    binder_write_read bwr;
    const bool needRead = mIn.dataPosition() >= mIn.dataSize();
    const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;

    bwr.write_size = outAvail;
    bwr.write_buffer = (uintptr_t)mOut.data();

    if (doReceive && needRead) {
        //接收數據緩衝區信息的填充。如果以後收到數據,就直接填在mIn中了。
        bwr.read_size = mIn.dataCapacity();
        bwr.read_buffer = (uintptr_t)mIn.data();
    } else {
        bwr.read_size = 0;
        bwr.read_buffer = 0;
    }
    //當讀緩衝和寫緩衝都爲空,則直接返回
    if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;

    bwr.write_consumed = 0;
    bwr.read_consumed = 0;
    status_t err;
    do {
        //通過ioctl不停的讀寫操作,跟Binder Driver進行通信
        if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
            err = NO_ERROR;
        ...
    } while (err == -EINTR); //當被中斷,則繼續執行
    ...
    return err;
}

至此添加服務的過程已全部分析完畢,最後是交給了binder 驅動的 ioctl 方法,至於數據發到哪裏去了其內部實現又是怎樣的,這裏暫時不做講解。具體的數據有 interfaceToken(遠程服務的名稱)、handle(遠程服務的句柄)、cookie(本地服務的地址),有兩個結構體 flat_binder_object 和 binder_write_read。
視頻地址:https://pan.baidu.com/s/1j_wgzITcgABVbThvO0VBPA
視頻密碼:jj4b

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章