Android源碼閱讀---binder客戶端進程

binder客戶端進程

  1. 在service manager這個進製程序起來後,系統就可以爲各個服務提供註冊功能,同時也可以爲需要服務的客戶端進程提供服務的查詢工作了。
  2. 從功能角度來看,service manager其實也是一個服務進程,它提供服務註冊、服務查詢的功能,然後相對於只提供服務的service manager,其他服務進程和請求服務的客戶端進程都是客戶進程,它們向service manager發出服務請求
  3. 然後客戶端進程如何使用servicemanager進程提供的服務呢?
    通過閱讀Service Manager進程源碼可以知道:
  4. 打開binder驅動設備
  5. 通過函數mmap將客戶端進程的內存地址與service manager進程的內存地址映射成同一塊物理地址
  6. 通過binder驅動向service manager發送請求,即將要發生給service manager進程的信息寫入這塊物理地址中
  7. 獲得service manager處理後的結果

1. 從getservice出發

應用進程通過ServiceManager的getservice來獲得服務的處理過程,實際上就是該進程向二進制進程service manager請求服務過程,然後看看它具體是怎麼做的
首先看看frameworks/base/core/java/android/os/ServiceManager.java這個類的源碼

public final class ServiceManager {
    private static final String TAG = "ServiceManager";
    private static IServiceManager sServiceManager;
    private static HashMap<String, IBinder> sCache = new HashMap<String, IBinder>();/*緩存之前請求過的服務,這樣針對請求過的服務就不用再次去向二進制進程service manager請求了*/
    private static IServiceManager getIServiceManager() {
        if (sServiceManager != null) {
            return sServiceManager;
        }
        // Find the service manager
        sServiceManager = ServiceManagerNative.asInterface(BinderInternal.getContextObject());
        return sServiceManager;
    }
    /**
     * Returns a reference to a service with the given name.
     * @param name the name of the service to get
     * @return a reference to the service, or <code>null</code> if the service doesn't exist
     */
    public static IBinder getService(String name) {
/*
首先,在緩存sCache中查找
然後,如果緩存中沒找到,向二進制進程service manager請求
*/
        try {
            IBinder service = sCache.get(name); 
            if (service != null) {
                return service;
            } else {
                return getIServiceManager().getService(name);
            }
        } catch (RemoteException e) {
            Log.e(TAG, "error in getService", e);
        }
        return null;
    }
    /**
     * Place a new @a service called @a name into the service manager.
     * 
     * @param name the name of the new service
     * @param service the service object
     */
    public static void addService(String name, IBinder service) {
        try {
            getIServiceManager().addService(name, service, false);
        } catch (RemoteException e) {
            Log.e(TAG, "error in addService", e);
        }
    }
    /**
     * Place a new @a service called @a name into the service manager.
     * 
     * @param name the name of the new service
     * @param service the service object
     * @param allowIsolated set to true to allow isolated sandboxed processes
     * to access this service
     */
    public static void addService(String name, IBinder service, boolean allowIsolated) {
        try {
            getIServiceManager().addService(name, service, allowIsolated);
        } catch (RemoteException e) {
            Log.e(TAG, "error in addService", e);
        }
    }
    /**
     * Retrieve an existing service called @a name from theservice manager. Non-blocking.
     */
    public static IBinder checkService(String name) {
        try {
            IBinder service = sCache.get(name);
            if (service != null) {
                return service;
            } else {
                return getIServiceManager().checkService(name);
            }
        } catch (RemoteException e) {
            Log.e(TAG, "error in checkService", e);
            return null;
        }
    }

    /**
     * Return a list of all currently running services.
     * @return an array of all currently running services, or <code>null</code> in case of an exception
     */
    public static String[] listServices() {
        try {
            return getIServiceManager().listServices();
        } catch (RemoteException e) {
            Log.e(TAG, "error in listServices", e);
            return null;
        }
    }

    /**
     * This is only intended to be called when the process is first being brought up and bound by the activity manager. There is only one thread in the proces at that time, so no locking is done.
     * 
     * @param cache the cache of service references
     * @hide
     */
    public static void initServiceCache(Map<String, IBinder> cache) {
        if (sCache.size() != 0) {
            throw new IllegalStateException("setServiceCache may only be called once");
        }
        sCache.putAll(cache);
    }
}

ServiceManager類只是對ServiceManagerProxy對象的一個簡單包裝,如ServiceManager類的getService方法其實質還是調用的ServiceManagerProxy對象的getService方法。
然後ServiceManager.getService其實相當於new ServiceManagerProxy(BinderInternal.getContextObject()).getService,然後看看ServiceManagerProxy類的getService方法:

/*
1.將請求的服務器名字包裝在Parcel結構中
2. 通過IBinder的transact方法,向service manager進程發出請求
3. 從Parcel對象reply中獲取反饋回來的結果
*/
    public IBinder getService(String name) throws RemoteException {
        Parcel data = Parcel.obtain();
        Parcel reply = Parcel.obtain();
        data.writeInterfaceToken(IServiceManager.descriptor);
        data.writeString(name);
        mRemote.transact(GET_SERVICE_TRANSACTION, data, reply, 0);/*mRemote就是BinderInternal.getContextObject()對象,執行transact後,會將結果返回到reply中*/
        IBinder binder = reply.readStrongBinder();/*將返回的結果從reply中騰出來*/
        reply.recycle();
        data.recycle();
        return binder;
    }

想知道IBinder對象怎麼向service manager進程發出請求的,

  1. 首先,弄清楚BinderInternal.getContextObject()是個什麼對象,
  2. 然後再看BinderInternal.getContextObject()對象的transact方法做了些什麼工作。

1.1 BinderInternal.getContextObject()對象

BinderInternal.getContextObject()的實現如下:

    /**
     * Return the global "context object" of the system. This is usually
     * an implementation of IServiceManager, which you can use to find
     * other services.
     */
    public static final native IBinder getContextObject();

從代碼中看到getContextObject方法是個native方法,在看在文件frameworks/base/core/jni/android_util_Binder.cpp中有它的實現

static jobject android_os_BinderInternal_getContextObject(JNIEnv* env, jobject clazz)
{
    sp<IBinder> b = ProcessState::self()->getContextObject(NULL);/*創建一個BpBinder對象*/
    return javaObjectForIBinder(env, b); /*將cpp層的BpBinder轉化成java層的BinderProxy對象*/ 
} 

整個方法就2句話:

  • 首先創建一個IBinder接口型的對象
  • 將前面創建的對象轉化爲java層的IBinder接口型的對象

1.1.1 c++層IBinder接口型的對象

首先看看ProcessState::self()->getContextObject(NULL)創建的IBinder對象

sp<ProcessState> ProcessState::self()
{
    Mutex::Autolock _l(gProcessMutex);
    if (gProcess != NULL) {/*判斷全局變量gProcess 是否爲空,如果不爲空,將之前創建的對象返回,第一次進入肯定爲空,會走下面的邏輯*/
        return gProcess;
    }
    gProcess = new ProcessState; /*new 了一個ProcessState對象*/
    return gProcess;
}
ProcessState::ProcessState()
    : mDriverFD(open_driver())/*打開binder驅動*/
    , mVMStart(MAP_FAILED)
    , mThreadCountLock(PTHREAD_MUTEX_INITIALIZER)
    , mThreadCountDecrement(PTHREAD_COND_INITIALIZER)
    , mExecutingThreadsCount(0)
    , mMaxThreads(DEFAULT_MAX_BINDER_THREADS)
    , mStarvationStartTimeMs(0)
    , mManagesContexts(false)
    , mBinderContextCheckFunc(NULL)
    , mBinderContextUserData(NULL)
    , mThreadPoolStarted(false)
    , mThreadPoolSeq(1)
{
    if (mDriverFD >= 0) {/*如果binder驅動打開成功*/
        // mmap the binder, providing a chunk of virtual address space to receive transactions.
        mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);/*將自己進程的大小爲BINDER_VM_SIZE內存映射*/
/*#define BINDER_VM_SIZE ((1*1024*1024) - (4096 *2))=1016k大小的空間
*/
        if (mVMStart == MAP_FAILED) {
            // *sigh*
            ALOGE("Using /dev/binder failed: unable to mmap transaction memory.\n");
            close(mDriverFD);
            mDriverFD = -1;
        }
    }
    LOG_ALWAYS_FATAL_IF(mDriverFD < 0, "Binder driver could not be opened. Terminating.");
}

sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)/*傳進去的參數NULL其實並沒有使用*/
{
    return getStrongProxyForHandle(0);
}
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)/*傳入參數爲0,代表server manager這個服務*/
{
    sp<IBinder> result;
    AutoMutex _l(mLock);
    handle_entry* e = lookupHandleLocked(handle); /*查找目標服務的binder信息,返回的值,正常情況下都不會爲空*/
    if (e != NULL) {
        // We need to create a new BpBinder if there isn't currently one, OR we are unable to acquire a weak reference on this current one. See comment in getWeakProxyForHandle() for more info about this.
        IBinder* b = e->binder;
        if (b == NULL || !e->refs->attemptIncWeak(this)) {/*第一次進入 ,refs和b 都是爲空的*/
            if (handle == 0) {
                // Special case for context manager...
                // The context manager is the only object for which we create a BpBinder proxy without already holding a reference.
                // Perform a dummy transaction to ensure the context manager is registered before we create the first local reference to it (which will occur when creating the BpBinder).
                // If a local reference is created for the BpBinder when the context manager is not present, the driver will fail to provide a reference to the context manager, but the driver API does not return status.
                //
                // Note that this is not race-free if the context manager dies while this code runs.
                //
                // TODO: add a driver API to wait for context manager, or stop special casing handle 0 for context manager and add a driver API to get a handle to the context manager with proper reference counting.
                Parcel data;
                status_t status = IPCThreadState::self()->transact(
                        0, IBinder::PING_TRANSACTION, data, NULL, 0);
                if (status == DEAD_OBJECT)
                   return NULL;
            }

            b = new BpBinder(handle);  /*創建了一個c++層的BpBinder對象*/
            e->binder = b;
            if (b) e->refs = b->getWeakRefs(); /*將BpBinder對象弱引用也存起來*/
            result = b;
        } else {
            // This little bit of nastyness is to allow us to add a primary reference to the remote proxy when this team doesn't have one but another team is sending the handle to us.
            result.force_set(b);
            e->refs->decWeak(this);
        }
    }
    return result;
}
ProcessState::handle_entry* ProcessState::lookupHandleLocked(int32_t handle)
{
    const size_t N=mHandleToObject.size(); /* 全局變量mHandleToObject是一個Vector類型,記錄不同進程的handle信息*/
    if (N <= (size_t)handle) {/*如果查詢的這個handle不存在, 創建一個*/
        handle_entry e;
        e.binder = NULL;
        e.refs = NULL;
        status_t err = mHandleToObject.insertAt(e, N, handle+1-N);
        if (err < NO_ERROR) return NULL;
    }
    return &mHandleToObject.editItemAt(handle);
}

所以到現在爲止,可以確定ProcessState::self()->getContextObject(NULL)創建的是一個c++層的BpBinder對象。

1.1.2 java層IBinder接口型對象

然後看看javaObjectForIBinder(env, b)函數將得到的BpBinder對象轉化成什麼類型對象,又回到文件frameworks/base/core/jni/android_util_Binder.cpp中查看javaObjectForIBinder方法實現:

static struct bindernative_offsets_t {  jclass mClass;  jmethodID mExecTransact;    jfieldID mObject;} gBinderOffsets;/*靜態變量*/
static struct binderproxy_offsets_t{ jclass mClass;  jmethodID mConstructor; jmethodID mSendDeathNotice; jfieldID mObject;  jfieldID mSelf; jfieldID mOrgue;} gBinderProxyOffsets; /*靜態變量*/
jobject javaObjectForIBinder(JNIEnv* env, const sp<IBinder>& val)
{
    if (val == NULL) return NULL;/*判空處理,此時val 是一個BpBinder對象*/
    if (val->checkSubclass(&gBinderOffsets)) {/*checkSubclass是個抽象函數 定義在IBinder.h中,並在Binder.cpp文件中實現,該函數直接返回false,BpBinder對象繼承自IBinder.h*/
        // One of our own!
        jobject object = static_cast<JavaBBinder*>(val.get())->object();
        LOGDEATH("objectForBinder %p: it's our own %p!\n", val.get(), object);
        return object;
    }

    // For the rest of the function we will hold this lock, to serialize looking/creation/destruction of Java proxies for native Binder proxies.
    AutoMutex _l(mProxyLock);

    // Someone else's... do we know about it?
    jobject object = (jobject)val->findObject(&gBinderProxyOffsets);
    if (object != NULL) {
        jobject res = jniGetReferent(env, object);
        if (res != NULL) {
            ALOGV("objectForBinder %p: found existing %p!\n", val.get(), res);
            return res;
        }
        LOGDEATH("Proxy object %p of IBinder %p no longer in working set!!!", object, val.get());
        android_atomic_dec(&gNumProxyRefs);
        val->detachObject(&gBinderProxyOffsets);
        env->DeleteGlobalRef(object);
    }

    object = env->NewObject(gBinderProxyOffsets.mClass, gBinderProxyOffsets.mConstructor);/*此處通過JNI調用java類android/os/BinderProxy*/
    if (object != NULL) {
        LOGDEATH("objectForBinder %p: created new proxy %p !\n", val.get(), object);
        // The proxy holds a reference to the native object.
        env->SetLongField(object, gBinderProxyOffsets.mObject, (jlong)val.get());/*並且把BpBinder對象指針賦值給android/os/BinderProxy對象的成員變量mObject*/
        val->incStrong((void*)javaObjectForIBinder);

        // The native object needs to hold a weak reference back to the proxy, so we can retrieve the same proxy if it is still active.
        jobject refObject = env->NewGlobalRef(env->GetObjectField(object, gBinderProxyOffsets.mSelf));/*拿到java類android/os/BinderProxy的屬性mSelf*/
        val->attachObject(&gBinderProxyOffsets, refObject,  jnienv_to_javavm(env), proxy_cleanup);

        // Also remember the death recipients registered on this proxy
        sp<DeathRecipientList> drl = new DeathRecipientList;
        drl->incStrong((void*)javaObjectForIBinder);
        env->SetLongField(object, gBinderProxyOffsets.mOrgue, reinterpret_cast<jlong>(drl.get()));/*給java類android/os/BinderProxy的屬性mOrgue賦值*/

        // Note that a new object reference has been created.
        android_atomic_inc(&gNumProxyRefs);
        incRefsCreated(env);
    }
    return object;
}

所以javaObjectForIBinder(env, b)函數將得到的BpBinder對象轉化成java層的android/os/BinderProxy類對象

然後在回到最開始的位置,在ServiceManager .java文件中,getIServiceManager方法中拿到的是一個ServiceManagerProxy對象,創建ServiceManagerProxy對象時,傳入的參數是一個 BinderProxy對象,等價於ServiceManagerNative.asInterface(BinderInternal.getContextObject())=new ServiceManagerProxy(new BinderProxy())

1.2 transact方法

在創建的ServiceManagerProxy對象中,傳入的參數是一個 BinderProxy對象,即mRemote=new BinderProxy(), 所以getService方法中的transact方法是BinderProxy類中的transact方法。

    public boolean transact(int code, Parcel data, Parcel reply, int flags) throws RemoteException {
        Binder.checkParcel(this, code, data, "Unreasonably large binder buffer");/*檢查Parcel對象*/
        if (Binder.isTracingEnabled()) { Binder.getTransactionTracker().addTrace(); }
        return transactNative(code, data, reply, flags);
    }
public native boolean transactNative(int code, Parcel data, Parcel reply,int flags) throws RemoteException;/*又使用native 方法,轉入c++層,使用的是android_util_Binder.cpp
中的android_os_BinderProxy_transact方法*/

接下來,轉入android_util_Binder.cpp文件中

static jboolean android_os_BinderProxy_transact(JNIEnv* env, jobject obj, jint code, jobject dataObj, jobject replyObj, jint flags) // throws RemoteException
{
    if (dataObj == NULL) {/*向binder驅動傳遞的數據不能爲空*/
        jniThrowNullPointerException(env, NULL);
        return JNI_FALSE;
    }

    Parcel* data = parcelForJavaObject(env, dataObj);/*將java層的Parcel轉成c++層Parcel*/
    if (data == NULL) {
        return JNI_FALSE;
    }
    Parcel* reply = parcelForJavaObject(env, replyObj);
    if (reply == NULL && replyObj != NULL) {
        return JNI_FALSE;
    }

    IBinder* target = (IBinder*)env->GetLongField(obj, gBinderProxyOffsets.mObject); /*get到android/os/BinderProxy對象的成員變量mObject,在創建BinderProxy對象時,將BpBinder對象地址賦值給了成員變量mObject,所以此處target是個BpBinder對象*/
    if (target == NULL) {
        jniThrowException(env, "java/lang/IllegalStateException", "Binder has been finalized!");
        return JNI_FALSE;
    }
    ALOGV("Java code calling transact on %p in Java object %p with code %" PRId32 "\n", target, obj, code);

    bool time_binder_calls;
    int64_t start_millis;
    if (kEnableBinderSample) {
        // Only log the binder call duration for things on the Java-level main thread.
        // But if we don't
        time_binder_calls = should_time_binder_calls();

        if (time_binder_calls) {
            start_millis = uptimeMillis();
        }
    }

    status_t err = target->transact(code, *data, reply, flags);/*調用BpBinder的transact*/
    //if (reply) printf("Transact from Java code to %p received: ", target); reply->print();

    if (kEnableBinderSample) {
        if (time_binder_calls) {
            conditionally_log_binder_call(start_millis, target, code);
        }
    }
    if (err == NO_ERROR) {
        return JNI_TRUE;
    } else if (err == UNKNOWN_TRANSACTION) {
        return JNI_FALSE;
    }
    signalExceptionForError(env, obj, err, true /*canThrowRemoteException*/, data->dataSize());
    return JNI_FALSE;
}

在看看BpBinder的transact做了些什麼

status_t BpBinder::transact( uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
/*
1. 創建一個IPCThreadState對象
2. 調用IPCThreadState對象的transact函數
*/
    // Once a binder has died, it will never come back to life.
    if (mAlive) {
        status_t status = IPCThreadState::self()->transact(mHandle, code, data, reply, flags); /*調用IPCThreadState的transact*/
        if (status == DEAD_OBJECT) mAlive = 0;
        return status;
    }
    return DEAD_OBJECT;
}

在轉入frameworks/native/libs/binder/IPCThreadState.cpp文件中

首先IPCThreadState::self()創建了一個單例的IPCThreadState對象

IPCThreadState::IPCThreadState(): mProcess(ProcessState::self()),mMyThreadId(gettid()), mStrictModePolicy(0),mLastTransactionBinderFlags(0){
    pthread_setspecific(gTLS, this);
    clearCaller();
    mIn.setDataCapacity(256);
    mOut.setDataCapacity(256);
}

IPCThreadState* IPCThreadState::self()
{
    if (gHaveTLS) {/*第一次進入 gHaveTLS爲 false*/
restart:
        const pthread_key_t k = gTLS;
        IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
        if (st) return st;
        return new IPCThreadState;/*此處返回一個IPCThreadState對象*/
    }
    
    if (gShutdown) {
        ALOGW("Calling IPCThreadState::self() during shutdown is dangerous, expect a crash.\n");
        return NULL;
    }
    
    pthread_mutex_lock(&gTLSMutex);
    if (!gHaveTLS) {
        int key_create_value = pthread_key_create(&gTLS, threadDestructor);/*此處有個TLS機制,即線程本地儲存空間Thread local storage,它可以保證某個變量只有線程自己訪問有效*/
        if (key_create_value != 0) {
            pthread_mutex_unlock(&gTLSMutex);
            ALOGW("IPCThreadState::self() unable to create TLS key, expect a crash: %s\n",
                    strerror(key_create_value));
            return NULL;
        }
        gHaveTLS = true;
    }
    pthread_mutex_unlock(&gTLSMutex);
    goto restart;
}

然後,看看transact方法具體做了什麼工作

status_t IPCThreadState::transact(int32_t handle,uint32_t code, const Parcel& data,Parcel* reply, uint32_t flags)
{
/*
1. 檢查數據有效性
2. 數據打包
3. 獲取結果
*/
    status_t err = data.errorCheck(); /*首先檢查要傳給binder驅動的數據data的有效性*/
    flags |= TF_ACCEPT_FDS;/*flag原始值爲0*/
    IF_LOG_TRANSACTIONS() {
        TextOutput::Bundle _b(alog);
        alog << "BC_TRANSACTION thr " << (void*)pthread_self() << " / hand "
            << handle << " / code " << TypeCode(code) << ": "
            << indent << data << dedent << endl;
    }
    
    if (err == NO_ERROR) {
        LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(), (flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");
        err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);/*打包數據準備傳給binder驅動*/
    }
    if (err != NO_ERROR) {/* 錯誤處理*/
        if (reply) reply->setError(err);
        return (mLastError = err);
    }
    if ((flags & TF_ONE_WAY) == 0) {/*不是異步獲取結果情況*/
        if (reply) {
/*waitForResponse函數在頭文件中聲明如下:
status_t waitForResponse(Parcel *reply,status_t *acquireResult=NULL);
即如果只有一個參數情況,默認第二個參數爲NULL
*/
            err = waitForResponse(reply);/*獲取從binder返回的結果並存入reply中*/
        } else {
            Parcel fakeReply;
            err = waitForResponse(&fakeReply);
        }
        IF_LOG_TRANSACTIONS() {
            TextOutput::Bundle _b(alog);
            alog << "BR_REPLY thr " << (void*)pthread_self() << " / hand "
                << handle << ": ";
            if (reply) alog << indent << *reply << dedent << endl;
            else alog << "(none requested)" << endl;
        }
    } else {/*異步獲取結果情況*/
        err = waitForResponse(NULL, NULL);
    }
    return err;
}
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{/*將要傳給binder驅動的數據打包成binder_transaction_data 結構體類型,因爲service manager在讀取binder驅動傳入的數據時候解析的也是binder_transaction_data 結構體數據*/
    binder_transaction_data tr;
    tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
    tr.target.handle = handle;
    tr.code = code;
    tr.flags = binderFlags;
    tr.cookie = 0;
    tr.sender_pid = 0;
    tr.sender_euid = 0;
    const status_t err = data.errorCheck();/*之前已經檢查過data 這裏又檢查一次 */
    if (err == NO_ERROR) {
        tr.data_size = data.ipcDataSize();
        tr.data.ptr.buffer = data.ipcData();
        tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
        tr.data.ptr.offsets = data.ipcObjects();
    } else if (statusBuffer) {
        tr.flags |= TF_STATUS_CODE;
        *statusBuffer = err;
        tr.data_size = sizeof(status_t);
        tr.data.ptr.buffer = reinterpret_cast<uintptr_t>(statusBuffer);
        tr.offsets_size = 0;
        tr.data.ptr.offsets = 0;
    } else {
        return (mLastError = err);
    }
    mOut.writeInt32(cmd);
    mOut.write(&tr, sizeof(tr));/*將binder驅動的BC_TRANSACTION命令和結構體數據binder_transaction_data 再包入Parcel中*/
    return NO_ERROR;
}

接下來重點看看waitForResponse函數,它會將之前打包好的Parcel數據發給binder驅動,即到此客戶進程開始與binder驅動通信了

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    uint32_t cmd;
    int32_t err;

    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break;//將數據發送給binder驅動,並返回結果,結果存在 Parcel變量mIn中
        err = mIn.errorCheck();/*對返回的結果進行 檢查*/
        if (err < NO_ERROR) break;
        if (mIn.dataAvail() == 0) continue;/*如果不需要返回結果的情況下繼續下輪的循環*/
        cmd = (uint32_t)mIn.readInt32();
/*從返回回來的結果中讀取cmd命令
然後分別有下面這幾個case情況:
BR_TRANSACTION_COMPLETE
BR_DEAD_REPLY
BR_FAILED_REPLY
BR_ACQUIRE_RESULT
BR_REPLY
default
*/
        switch (cmd) {
        case BR_TRANSACTION_COMPLETE:
            if (!reply && !acquireResult) goto finish;
            break;
        
        case BR_DEAD_REPLY:
            err = DEAD_OBJECT;
            goto finish;

        case BR_FAILED_REPLY:
            err = FAILED_TRANSACTION;
            goto finish;
        
        case BR_ACQUIRE_RESULT:
            {
                ALOG_ASSERT(acquireResult != NULL, "Unexpected brACQUIRE_RESULT");
                const int32_t result = mIn.readInt32();
                if (!acquireResult) continue;
                *acquireResult = result ? NO_ERROR : INVALID_OPERATION;
            }
            goto finish;
        
        case BR_REPLY:
            {
                binder_transaction_data tr;/*因爲binder 驅動如果需要返回結果,那麼結果數據將會是用一個binder_transaction_data 結構體裝的 */
                err = mIn.read(&tr, sizeof(tr));
                ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
                if (err != NO_ERROR) goto finish;
                if (reply) {
                    if ((tr.flags & TF_STATUS_CODE) == 0) {
                        reply->ipcSetDataReference(reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer), tr.data_size, reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets), tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);/*將結果從binder_transaction_data 結構體取出來,填入reply中*/
                    } else {
                        err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer); /* 如果出錯,將錯誤取出來*/
                        freeBuffer(NULL, reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer), tr.data_size, reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets), tr.offsets_size/sizeof(binder_size_t), this);/*釋放binder_transaction_data 結構體佔用的空間*/
                    }
                } else {
                    freeBuffer(NULL,  reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer), tr.data_size, reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets), tr.offsets_size/sizeof(binder_size_t), this);
                    continue;/*繼續下一輪循環*/ 
                }
            }
            goto finish;/*退出循環,結束waitForResponse*/
        default:
            err = executeCommand(cmd);
            if (err != NO_ERROR) goto finish;
            break;
        }
    }
finish:
    if (err != NO_ERROR) {
        if (acquireResult) *acquireResult = err;
        if (reply) reply->setError(err);
        mLastError = err;
    }
    return err;
}

/*函數定義如下:
status_t talkWithDriver(bool doReceive=true);
即默認參數爲true
*/
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
    if (mProcess->mDriverFD <= 0) {/*檢查/dev/binder設備節點的文件句柄*/
        return -EBADF;
    }
    binder_write_read bwr;/*這個數據結構在service manager中也有用到,用來存儲讀寫的數據*/
    
    // Is the read buffer empty?
    const bool needRead = mIn.dataPosition() >= mIn.dataSize();
    
    // We don't want to write anything if we are still reading
    // from data left in the input buffer and the caller
    // has requested to read the next data.
    const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
    
    bwr.write_size = outAvail; /*將需要傳給binder驅動的數據存入結構體binder_write_read 中*/
    bwr.write_buffer = (uintptr_t)mOut.data();

    // This is what we'll read.
    if (doReceive && needRead) {/*如果需要binder驅動返回數據,指定返回數據存的地方*/
        bwr.read_size = mIn.dataCapacity();
        bwr.read_buffer = (uintptr_t)mIn.data();
    } else {
        bwr.read_size = 0;
        bwr.read_buffer = 0;
    }
    // Return immediately if there is nothing to do.
    if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR; /*如果既不寫也不讀 立即返回*/

    bwr.write_consumed = 0;
    bwr.read_consumed = 0;
    status_t err;
    do {
        if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)/*開始向 binder驅動進行讀寫,ioctl函數由binder驅動提供*/
            err = NO_ERROR;
        else
            err = -errno;
        if (mProcess->mDriverFD <= 0) {
            err = -EBADF;
        }
        IF_LOG_COMMANDS() {
            alog << "Finished read/write, write size = " << mOut.dataSize() << endl;
        }
    } while (err == -EINTR);/*一次循環走完就退出來了*/

    if (err >= NO_ERROR) {
        if (bwr.write_consumed > 0) {/*binder驅動讀取過傳給它的數據*/
            if (bwr.write_consumed < mOut.dataSize())/*binder驅動將傳給它的數據沒有讀完*/
                mOut.remove(0, bwr.write_consumed);/*將已經讀完的數據去掉*/
            else
                mOut.setDataSize(0);/*binder驅動將傳給它的數據已經讀完,直接將mOut總數據量設置爲0*/
        }
        if (bwr.read_consumed > 0) {/*binder驅動已經將返回的結果寫入到了客戶進程指定位置*/
            mIn.setDataSize(bwr.read_consumed);/*binder驅動寫入量作爲返回數據的大小*/
            mIn.setDataPosition(0);
        }
        return NO_ERROR;
    }
    return err;
}

可以看到客戶端進程將請求從java層一步一步向下傳到c++層,並按binder驅動能夠識別,將請求數據進行包裝,通過函數talkWithDriver與binder驅動真正進行數據通信並返回結果,其調用流程如下:

2. Binder 驅動

前面只看到了客戶進程調用函數ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr)向binder驅動進行數據的讀和寫,但是其內部怎麼操作的還是不清楚,所以有必要看看它在bionic/libc/bionic/ioctl.cpp中的具體實現:

#include <sys/ioctl.h>
#include <stdarg.h>
extern "C" int __ioctl(int, int, void *);
int ioctl(int fd, int request, ...) {
  va_list ap;
  va_start(ap, request);
  void* arg = va_arg(ap, void*);
  va_end(ap);
  return __ioctl(fd, request, arg);/*系統調用*/
}

__ioctl函數最終會調用到binder驅動註冊的binder_ioctl函數

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
 int ret;
 struct binder_proc *proc = filp->private_data;/*binder_open中創建的變量,該變量記錄了該客戶進程中所有與binder通信有關的信息*/
 struct binder_thread *thread;
 unsigned int size = _IOC_SIZE(cmd);
 void __user *ubuf = (void __user *)arg;

 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
   proc->pid, current->pid, cmd, arg);*/

 trace_binder_ioctl(cmd, arg);
/*這個wait_event_interruptible函數實際是個宏定義,即條件binder_stop_on_user_error < 2滿足,直接返回0,否則執行binder_user_error_wait即進程失去cpu調度並進入等待狀態*/
 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
 if (ret)
  goto err_unlocked;

 binder_lock(__func__);
 thread = binder_get_thread(proc);/*查詢或者添加一個線程的節點*/
 if (thread == NULL) {
  ret = -ENOMEM;
  goto err;
 }
/*開始出了具體的命令*/
 switch (cmd) {
 case BINDER_WRITE_READ:
  ret = binder_ioctl_write_read(filp, cmd, arg, thread);
  if (ret)
   goto err;
  break;
/*此處省略*/
}
 ret = 0;
err:
 if (thread)
  thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
 binder_unlock(__func__);
 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
 if (ret && ret != -ERESTARTSYS)
  pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
err_unlocked:
 trace_binder_ioctl_done(ret);
 return ret;
}
static int binder_open(struct inode *nodp, struct file *filp)/*主要創建一個binder_proc 變量,然後對該變量初始化*/
{
 struct binder_proc *proc;
 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
       current->group_leader->pid, current->pid);

 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
 if (proc == NULL)
  return -ENOMEM;
 get_task_struct(current);
 proc->tsk = current;
 INIT_LIST_HEAD(&proc->todo);
 init_waitqueue_head(&proc->wait);
 proc->default_priority = task_nice(current);

 binder_lock(__func__);

 binder_stats_created(BINDER_STAT_PROC);
 hlist_add_head(&proc->proc_node, &binder_procs);
 proc->pid = current->group_leader->pid;
 INIT_LIST_HEAD(&proc->delivered_death);
 filp->private_data = proc;

 binder_unlock(__func__);

 if (binder_debugfs_dir_entry_proc) {
  char strbuf[11];

  snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
  proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
   binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
 }
 return 0;
}

接下來看看BINDER_WRITE_READ命令下做了哪些操作:

static int binder_ioctl_write_read(struct file *filp, unsigned int cmd, unsigned long arg,   struct binder_thread *thread)
{
 int ret = 0;
 struct binder_proc *proc = filp->private_data; /*拿到binder_open是創建的變量proc*/
 unsigned int size = _IOC_SIZE(cmd);
 void __user *ubuf = (void __user *)arg;
 struct binder_write_read bwr;

 if (size != sizeof(struct binder_write_read)) {/*判斷buffer 大小是否符合要求*/
  ret = -EINVAL;
  goto out;
 }
 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {//從用戶空間複製數據到變量bwr中
  ret = -EFAULT;
  goto out;
 }
 if (bwr.write_size > 0) {/*如果bwr.write_size > 0 ,執行寫數據*/
  ret = binder_thread_write(proc, thread,bwr.write_buffer, bwr.write_size,&bwr.write_consumed);
  trace_binder_write_done(ret);
  if (ret < 0) {
   bwr.read_consumed = 0;
   if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
    ret = -EFAULT;
   goto out;
  }
 }
 if (bwr.read_size > 0) {/*如果bwr.read_size > 0 ,執行讀數據*/
  ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size,   &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
  trace_binder_read_done(ret);
  if (!list_empty(&proc->todo))
   wake_up_interruptible(&proc->wait);
  if (ret < 0) {
   if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
    ret = -EFAULT;
   goto out;
  }
 }
 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {/*將讀到的數據從內核空間copy到用戶空間,即參數arg指向的內存中*/
  ret = -EFAULT;
  goto out;
 }
out:
 return ret;
}

該函數主要做了:

  1. 將客戶端向服務端請求的數據從用戶空間copy到變量bwr中
  2. 如果有寫數據請求,執行寫數據
  3. 如果有讀數據請求,執行讀數據

2.1 向 binder驅動寫數據

/*  ret = binder_thread_write(proc, thread,bwr.write_buffer, bwr.write_size,&bwr.write_consumed); */
static int binder_thread_write(struct binder_proc *proc,struct binder_thread *thread,binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed)
{
 uint32_t cmd;
 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
 void __user *ptr = buffer + *consumed;/* 需要處理數據的起始位置*/
 void __user *end = buffer + size;/* 需要處理數據的終止位置*/

 while (ptr < end && thread->return_error == BR_OK) {/*循環的方法處理,直到數據終止位置*/
  if (get_user(cmd, (uint32_t __user *)ptr))/*獲取cmd命令*/
   return -EFAULT;
  ptr += sizeof(uint32_t);/*起始位置跳過已經取出的cmd*/
  trace_binder_command(cmd);
  if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
   binder_stats.bc[_IOC_NR(cmd)]++;
   proc->stats.bc[_IOC_NR(cmd)]++;
   thread->stats.bc[_IOC_NR(cmd)]++;
  }
/*開始處理cmd命令,此時取出的 cmd = BC_TRANSACTION */
  switch (cmd) {
/*此處省略*/
  case BC_TRANSACTION:
  case BC_REPLY: {
   struct binder_transaction_data tr;
   if (copy_from_user(&tr, ptr, sizeof(tr)))/*從用戶空間取出binder_transaction_data 結構數據*/
    return -EFAULT;
   ptr += sizeof(tr); /*將起始位置跳過已經拿到的binder_transaction_data 結構數據 */
   binder_transaction(proc, thread, &tr, cmd == BC_REPLY); /*執行具體的命令*/
   break;
  }
/*此處省略*/
   }
  *consumed = ptr - buffer;/*記錄已經處理過的數據的指針*/
 }
 return 0;
}

然後看看函數binder_transaction的實現:

/*
proc 客戶端進程的binder信息
thread 客戶端線程
tr 向服務端請求的數據
reply cmd命令
*/
static void binder_transaction(struct binder_proc *proc,struct binder_thread *thread, struct binder_transaction_data *tr, int reply)
{
    struct binder_transaction *t;
    struct binder_work *tcomplete;
    binder_size_t *offp, *off_end;
    binder_size_t off_min;
    struct binder_proc *target_proc;
    struct binder_thread *target_thread = NULL;
    struct binder_node *target_node = NULL;
    struct list_head *target_list;
    wait_queue_head_t *target_wait;
    struct binder_transaction *in_reply_to = NULL;
    struct binder_transaction_log_entry *e;
    uint32_t return_error;
/*用來添加log*/
    e = binder_transaction_log_add(&binder_transaction_log);
    e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
    e->from_proc = proc->pid;
    e->from_thread = thread->pid;
    e->target_handle = tr->target.handle;
    e->data_size = tr->data_size;
    e->offsets_size = tr->offsets_size;
/*在向sm發出請求時,cmd = BC_TRANSACTION,即reply爲false*/
    if (reply) {
        in_reply_to = thread->transaction_stack;
        if (in_reply_to == NULL) {
            binder_user_error("%d:%d got reply transaction with no transaction stack\n",
                      proc->pid, thread->pid);
            return_error = BR_FAILED_REPLY;
            goto err_empty_call_stack;
        }
        binder_set_nice(in_reply_to->saved_priority);
        if (in_reply_to->to_thread != thread) {
            binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
                proc->pid, thread->pid, in_reply_to->debug_id,
                in_reply_to->to_proc ?
                in_reply_to->to_proc->pid : 0,
                in_reply_to->to_thread ?
                in_reply_to->to_thread->pid : 0);
            return_error = BR_FAILED_REPLY;
            in_reply_to = NULL;
            goto err_bad_call_stack;
        }
        thread->transaction_stack = in_reply_to->to_parent;
        target_thread = in_reply_to->from;
        if (target_thread == NULL) {
            return_error = BR_DEAD_REPLY;
            goto err_dead_binder;
        }
        if (target_thread->transaction_stack != in_reply_to) {
            binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
                proc->pid, thread->pid,
                target_thread->transaction_stack ?
                target_thread->transaction_stack->debug_id : 0,
                in_reply_to->debug_id);
            return_error = BR_FAILED_REPLY;
            in_reply_to = NULL;
            target_thread = NULL;
            goto err_dead_binder;
        }
        target_proc = target_thread->proc;
    } else {
        if (tr->target.handle) {/*handle不爲0*/
            struct binder_ref *ref;
            ref = binder_get_ref(proc, tr->target.handle, true);
            if (ref == NULL) {
                binder_user_error("%d:%d got transaction to invalid handle\n",proc->pid, thread->pid);
                return_error = BR_FAILED_REPLY;
                goto err_invalid_target_handle;
            }
            target_node = ref->node;
        } else {/*handle爲0的情況,即目標服務就是serveice manager */
            target_node = binder_context_mgr_node;/*如果目標服務就是serveice manager 不查詢直接使用全局變量binder_context_mgr_node */
            if (target_node == NULL) {
                return_error = BR_DEAD_REPLY;
                goto err_no_context_mgr_node;
            }
        }
        e->to_node = target_node->debug_id;
        target_proc = target_node->proc;/*通過target_node找到目標進程的binder信息*/
        if (target_proc == NULL) {
            return_error = BR_DEAD_REPLY;
            goto err_dead_binder;
        }
        if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) {
            return_error = BR_FAILED_REPLY;
            goto err_invalid_target_handle;
        }
        if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
            struct binder_transaction *tmp;

            tmp = thread->transaction_stack;
            if (tmp->to_thread != thread) {
                binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",proc->pid, thread->pid, tmp->debug_id, tmp->to_proc ? tmp->to_proc->pid : 0, tmp->to_thread ?tmp->to_thread->pid : 0);
                return_error = BR_FAILED_REPLY;
                goto err_bad_call_stack;
            }
            while (tmp) {/*循環遍歷transaction棧*/
                if (tmp->from && tmp->from->proc == target_proc)
                    target_thread = tmp->from;/*找到目標線程*/
                tmp = tmp->from_parent;
            }
        }
    }
/*得到2個列表 分別是 tod 和 wait*/
    if (target_thread) {
        e->to_thread = target_thread->pid;
        target_list = &target_thread->todo;
        target_wait = &target_thread->wait;
    } else {
        target_list = &target_proc->todo;
        target_wait = &target_proc->wait;
    }
    e->to_proc = target_proc->pid;

    /* TODO: reuse incoming transaction for reply */
    t = kzalloc(sizeof(*t), GFP_KERNEL); /*爲binder_transaction 變量t 分配空間*/
    if (t == NULL) {
        return_error = BR_FAILED_REPLY;
        goto err_alloc_t_failed;
    }
    binder_stats_created(BINDER_STAT_TRANSACTION);

    tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); /*爲binder_work 變量tcomplete  分配空間*/
    if (tcomplete == NULL) {
        return_error = BR_FAILED_REPLY;
        goto err_alloc_tcomplete_failed;
    }
    binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);

    t->debug_id = ++binder_last_id;
    e->debug_id = t->debug_id;
/*debug信息*/
    if (reply)
        binder_debug(BINDER_DEBUG_TRANSACTION,
                 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
                 proc->pid, thread->pid, t->debug_id,
                 target_proc->pid, target_thread->pid,
                 (u64)tr->data.ptr.buffer,
                 (u64)tr->data.ptr.offsets,
                 (u64)tr->data_size, (u64)tr->offsets_size);
    else
        binder_debug(BINDER_DEBUG_TRANSACTION,
                 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
                 proc->pid, thread->pid, t->debug_id,
                 target_proc->pid, target_node->debug_id,
                 (u64)tr->data.ptr.buffer,
                 (u64)tr->data.ptr.offsets,
                 (u64)tr->data_size, (u64)tr->offsets_size);

/*給binder_transaction 變量t  填充內容*/
    if (!reply && !(tr->flags & TF_ONE_WAY))
        t->from = thread;/*transaction發起的線程*/
    else
        t->from = NULL;
    t->sender_euid = task_euid(proc->tsk);
    t->to_proc = target_proc;
    t->to_thread = target_thread;
    t->code = tr->code;/*向目標發起的請求碼*/
    t->flags = tr->flags;
    t->priority = task_nice(current);

    trace_binder_transaction(reply, t, target_node);

    t->buffer = binder_alloc_buf(target_proc, tr->data_size,tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));/*分配一塊內存空間,大小爲請求數據的大小,該空間對應的物理空間和 請求的服務進程的物理空間是同一塊  。類型是binder_buffer*/
    if (t->buffer == NULL) {
        return_error = BR_FAILED_REPLY;
        goto err_binder_alloc_buf_failed;
    }
    t->buffer->allow_user_free = 0;
    t->buffer->debug_id = t->debug_id;
    t->buffer->transaction = t;
    t->buffer->target_node = target_node;
    trace_binder_transaction_alloc_buf(t->buffer);
    if (target_node)
        binder_inc_node(target_node, 1, 0, NULL);

    offp = (binder_size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
/*將請求的數據 copy到t->buffer內存中,需要注意的是t->buffer內存對應的物理內存和服務進程的物理內存是同一塊 ,簡點理解就是將客戶進程的數據直接copy到了服務進程的內存空間中,這樣服務進程就可以直接訪問這些數據了*/
    if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)tr->data.ptr.buffer, tr->data_size)) {
        binder_user_error("%d:%d got transaction with invalid data ptr\n", proc->pid, thread->pid);
        return_error = BR_FAILED_REPLY;
        goto err_copy_data_failed;
    }
    if (copy_from_user(offp, (const void __user *)(uintptr_t) tr->data.ptr.offsets, tr->offsets_size)) {
        binder_user_error("%d:%d got transaction with invalid offsets ptr\n", proc->pid, thread->pid);
        return_error = BR_FAILED_REPLY;
        goto err_copy_data_failed;
    }
    if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {/*交錯處理*/
        binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", proc->pid, thread->pid, (u64)tr->offsets_size);
        return_error = BR_FAILED_REPLY;
        goto err_bad_offset;
    }
    off_end = (void *)offp + tr->offsets_size;
    off_min = 0;
    for (; offp < off_end; offp++) {
        struct flat_binder_object *fp;

        if (*offp > t->buffer->data_size - sizeof(*fp) ||*offp < off_min ||t->buffer->data_size < sizeof(*fp) || !IS_ALIGNED(*offp, sizeof(u32))) {
            binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
                      proc->pid, thread->pid, (u64)*offp,
                      (u64)off_min,
                      (u64)(t->buffer->data_size -
                      sizeof(*fp)));
            return_error = BR_FAILED_REPLY;
            goto err_bad_offset;
        }
        fp = (struct flat_binder_object *)(t->buffer->data + *offp);
        off_min = *offp + sizeof(struct flat_binder_object);
        switch (fp->type) {
        case BINDER_TYPE_BINDER:
        case BINDER_TYPE_WEAK_BINDER: {
            struct binder_ref *ref;
            struct binder_node *node = binder_get_node(proc, fp->binder);

            if (node == NULL) {
                node = binder_new_node(proc, fp->binder, fp->cookie);
                if (node == NULL) {
                    return_error = BR_FAILED_REPLY;
                    goto err_binder_new_node_failed;
                }
                node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
                node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
            }
            if (fp->cookie != node->cookie) {
                binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
                    proc->pid, thread->pid,
                    (u64)fp->binder, node->debug_id,
                    (u64)fp->cookie, (u64)node->cookie);
                return_error = BR_FAILED_REPLY;
                goto err_binder_get_ref_for_node_failed;
            }
    if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
                return_error = BR_FAILED_REPLY;
                goto err_binder_get_ref_for_node_failed;
            }
            ref = binder_get_ref_for_node(target_proc, node);
            if (ref == NULL) {
                return_error = BR_FAILED_REPLY;
                goto err_binder_get_ref_for_node_failed;
            }
            if (fp->type == BINDER_TYPE_BINDER)
                fp->type = BINDER_TYPE_HANDLE;
            else
                fp->type = BINDER_TYPE_WEAK_HANDLE;
            fp->binder = 0;
            fp->handle = ref->desc;
            fp->cookie = 0;
            binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
                       &thread->todo);

            trace_binder_transaction_node_to_ref(t, node, ref);
            binder_debug(BINDER_DEBUG_TRANSACTION,
                     "        node %d u%016llx -> ref %d desc %d\n",
                     node->debug_id, (u64)node->ptr,
                     ref->debug_id, ref->desc);
        } break;
        case BINDER_TYPE_HANDLE:
        case BINDER_TYPE_WEAK_HANDLE: {
/*
1. 查找binder_object對應的binder_ref
2. 根據目標進程是否是與自己是同一個進程做不同處理,如果是同一個進程
*/
            struct binder_ref *ref = binder_get_ref(proc, fp->handle, fp->type == BINDER_TYPE_HANDLE);
            if (ref == NULL) {
                binder_user_error("%d:%d got transaction with invalid handle, %d\n",
                        proc->pid,
                        thread->pid, fp->handle);
                return_error = BR_FAILED_REPLY;
                goto err_binder_get_ref_failed;
            }
            if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
                return_error = BR_FAILED_REPLY;
                goto err_binder_get_ref_failed;
            }
            if (ref->node->proc == target_proc) {
                if (fp->type == BINDER_TYPE_HANDLE)
                    fp->type = BINDER_TYPE_BINDER;
                else
                    fp->type = BINDER_TYPE_WEAK_BINDER;
                fp->binder = ref->node->ptr;
                fp->cookie = ref->node->cookie;
                binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
                trace_binder_transaction_ref_to_node(t, ref);
                binder_debug(BINDER_DEBUG_TRANSACTION, "  ref %d desc %d -> node %d u%016llx\n",ref->debug_id, ref->desc, ref->node->debug_id,(u64)ref->node->ptr);
            } else {
                struct binder_ref *new_ref;
                new_ref = binder_get_ref_for_node(target_proc, ref->node);
                if (new_ref == NULL) {
                    return_error = BR_FAILED_REPLY;
                    goto err_binder_get_ref_for_node_failed;
                }
                fp->binder = 0;
                fp->handle = new_ref->desc;
                fp->cookie = 0;
                binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
                trace_binder_transaction_ref_to_ref(t, ref,new_ref);
                binder_debug(BINDER_DEBUG_TRANSACTION,   "        ref %d desc %d -> ref %d desc %d (node %d)\n", ref->debug_id, ref->desc, new_ref->debug_id,new_ref->desc, ref->node->debug_id);
            }
        } break;

        case BINDER_TYPE_FD: {
            int target_fd;
            struct file *file;

            if (reply) {
                if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
                    binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
                        proc->pid, thread->pid, fp->handle);
                    return_error = BR_FAILED_REPLY;
                    goto err_fd_not_allowed;
                }
            } else if (!target_node->accept_fds) {
                binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
                    proc->pid, thread->pid, fp->handle);
                return_error = BR_FAILED_REPLY;
                goto err_fd_not_allowed;
            }

            file = fget(fp->handle);
            if (file == NULL) {
                binder_user_error("%d:%d got transaction with invalid fd, %d\n",
                    proc->pid, thread->pid, fp->handle);
                return_error = BR_FAILED_REPLY;
                goto err_fget_failed;
            }
            if (security_binder_transfer_file(proc->tsk, target_proc->tsk, file) < 0) {
                fput(file);
                return_error = BR_FAILED_REPLY;
                goto err_get_unused_fd_failed;
            }
            target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
            if (target_fd < 0) {
                fput(file);
                return_error = BR_FAILED_REPLY;
                goto err_get_unused_fd_failed;
            }
            task_fd_install(target_proc, target_fd, file);
            trace_binder_transaction_fd(t, fp->handle, target_fd);
            binder_debug(BINDER_DEBUG_TRANSACTION,
                     "        fd %d -> %d\n", fp->handle, target_fd);
            /* TODO: fput? */
            fp->binder = 0;
            fp->handle = target_fd;
        } break;

        default:
            binder_user_error("%d:%d got transaction with invalid object type, %x\n",
                proc->pid, thread->pid, fp->type);
            return_error = BR_FAILED_REPLY;
            goto err_bad_object_type;
        }
    }
    if (reply) {/*當前reply還是false*/
        BUG_ON(t->buffer->async_transaction != 0);
        binder_pop_transaction(target_thread, in_reply_to);
    } else if (!(t->flags & TF_ONE_WAY)) {
        BUG_ON(t->buffer->async_transaction != 0);
        t->need_reply = 1;/*記錄本次 transaction */
        t->from_parent = thread->transaction_stack;
        thread->transaction_stack = t;
    } else {
        BUG_ON(target_node == NULL);
        BUG_ON(t->buffer->async_transaction != 1);
        if (target_node->has_async_transaction) {
            target_list = &target_node->async_todo;
            target_wait = NULL;
        } else
            target_node->has_async_transaction = 1;
    }
    t->work.type = BINDER_WORK_TRANSACTION;
    list_add_tail(&t->work.entry, target_list);/*加入target_list隊列,即目標進程的todo隊列*/
    tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
    list_add_tail(&tcomplete->entry, &thread->todo);/*加入客戶線程 todo隊列,即還有個未完成的操作*/
    if (target_wait) {
        if (reply || !(t->flags & TF_ONE_WAY)) {
            preempt_disable();
            wake_up_interruptible_sync(target_wait);/*喚醒服務進程*/
            preempt_enable_no_resched();
        } else {
            wake_up_interruptible(target_wait);
        }
    }
    return;

err_get_unused_fd_failed:
err_fget_failed:
err_fd_not_allowed:
err_binder_get_ref_for_node_failed:
err_binder_get_ref_failed:
err_binder_new_node_failed:
err_bad_object_type:
err_bad_offset:
err_copy_data_failed:
    trace_binder_transaction_failed_buffer_release(t->buffer);
    binder_transaction_buffer_release(target_proc, t->buffer, offp);
    t->buffer->transaction = NULL;
    binder_free_buf(target_proc, t->buffer);
err_binder_alloc_buf_failed:
    kfree(tcomplete);
    binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
    kfree(t);
    binder_stats_deleted(BINDER_STAT_TRANSACTION);
err_alloc_t_failed:
err_bad_call_stack:
err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
err_no_context_mgr_node:
    binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
             "%d:%d transaction failed %d, size %lld-%lld\n",
             proc->pid, thread->pid, return_error,
             (u64)tr->data_size, (u64)tr->offsets_size);
    {
        struct binder_transaction_log_entry *fe;

        fe = binder_transaction_log_add(&binder_transaction_log_failed);
        *fe = *e;
    }

    BUG_ON(thread->return_error != BR_OK);
    if (in_reply_to) {
        thread->return_error = BR_TRANSACTION_COMPLETE;
        binder_send_failed_reply(in_reply_to, return_error);
    } else
        thread->return_error = return_error;
}

2.2 向 binder驅動讀數據

static int binder_thread_read(struct binder_proc *proc,struct binder_thread *thread,binder_uintptr_t binder_buffer, size_t size,binder_size_t *consumed, int non_block)
{
 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;/*數據需要寫入的空間*/
 void __user *ptr = buffer + *consumed;/*數據的起始位置*/
 void __user *end = buffer + size;/*數據的結束位置*/

 int ret = 0;
 int wait_for_proc_work;

 if (*consumed == 0) {
  if (put_user(BR_NOOP, (uint32_t __user *)ptr))/*  如果剛開始讀,則先寫入一個BR_NOOP*/
   return -EFAULT;
  ptr += sizeof(uint32_t);
 }

retry:
 wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo);

 if (thread->return_error != BR_OK && ptr < end) {
  if (thread->return_error2 != BR_OK) {
   if (put_user(thread->return_error2, (uint32_t __user *)ptr))
    return -EFAULT;
   ptr += sizeof(uint32_t);
   binder_stat_br(proc, thread, thread->return_error2);
   if (ptr == end)
    goto done;
   thread->return_error2 = BR_OK;
  }
  if (put_user(thread->return_error, (uint32_t __user *)ptr))
   return -EFAULT;
  ptr += sizeof(uint32_t);
  binder_stat_br(proc, thread, thread->return_error);
  thread->return_error = BR_OK;
  goto done;
 }


 thread->looper |= BINDER_LOOPER_STATE_WAITING;
 if (wait_for_proc_work)
  proc->ready_threads++;

 binder_unlock(__func__);

 trace_binder_wait_for_work(wait_for_proc_work,
       !!thread->transaction_stack,
       !list_empty(&thread->todo));
 if (wait_for_proc_work) {
  if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
     BINDER_LOOPER_STATE_ENTERED))) {
   binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
    proc->pid, thread->pid, thread->looper);
   wait_event_interruptible(binder_user_error_wait,
       binder_stop_on_user_error < 2);
  }
  binder_set_nice(proc->default_priority);
  if (non_block) {
   if (!binder_has_proc_work(proc, thread))
    ret = -EAGAIN;
  } else
   ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
 } else {
  if (non_block) {
   if (!binder_has_thread_work(thread))
    ret = -EAGAIN;
  } else
   ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));/*一般運行到此處進程就開始睡眠,直到服務進程將返回結果寫入到本進程的內存中並喚醒自己 邏輯再次從此處向下走*/
 }

 binder_lock(__func__);

 if (wait_for_proc_work)
  proc->ready_threads--;
 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;

 if (ret)
  return ret;

 while (1) {
  uint32_t cmd;
  struct binder_transaction_data tr;
  struct binder_work *w;
  struct binder_transaction *t = NULL;

  if (!list_empty(&thread->todo)) {/*此時thread->todo不爲空*/
   w = list_first_entry(&thread->todo, struct binder_work,entry);
  } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
   w = list_first_entry(&proc->todo, struct binder_work,
          entry);
  } else {
   /* no data added */
   if (ptr - buffer == 4 &&
       !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
    goto retry;
   break;
  }

  if (end - ptr < sizeof(tr) + 4)/*空間大小檢查*/
   break;

  switch (w->type) {/*此時w->type=BINDER_WORK_TRANSACTION_COMPLETE*/
  case BINDER_WORK_TRANSACTION: {
   t = container_of(w, struct binder_transaction, work);
  } break;
  case BINDER_WORK_TRANSACTION_COMPLETE: {
   cmd = BR_TRANSACTION_COMPLETE;
   if (put_user(cmd, (uint32_t __user *)ptr))/*將cmd BR_TRANSACTION_COMPLETE寫入讀取數據的空間*/
    return -EFAULT;
   ptr += sizeof(uint32_t);/*指針繼續向後移動一個uint32_t位置*/

   binder_stat_br(proc, thread, cmd);
   binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, "%d:%d BR_TRANSACTION_COMPLETE\n",proc->pid, thread->pid);

   list_del(&w->entry);/*刪除當前已經處理完的entry*/
   kfree(w);
   binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
  } break;/*至此 binder_buffer空間裏面存了2個cmd命令   BR_NOOP && BR_TRANSACTION_COMPLETE*/
}
 /* 此處省略*/
  BUG_ON(t->buffer == NULL);
  if (t->buffer->target_node) {
   struct binder_node *target_node = t->buffer->target_node;
   tr.target.ptr = target_node->ptr;
   tr.cookie = target_node->cookie;
   t->saved_priority = task_nice(current);
   if (t->priority < target_node->min_priority &&
       !(t->flags & TF_ONE_WAY))
    binder_set_nice(t->priority);
   else if (!(t->flags & TF_ONE_WAY) ||t->saved_priority > target_node->min_priority)
    binder_set_nice(target_node->min_priority);
   cmd = BR_TRANSACTION;
  } else {
   tr.target.ptr = 0;
   tr.cookie = 0;
   cmd = BR_REPLY;
  }
  tr.code = t->code;
  tr.flags = t->flags;
  tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
  if (t->from) {
   struct task_struct *sender = t->from->proc->tsk;

   tr.sender_pid = task_tgid_nr_ns(sender,
       task_active_pid_ns(current));
  } else {
   tr.sender_pid = 0;
  }

  tr.data_size = t->buffer->data_size;/*數據大小*/
  tr.offsets_size = t->buffer->offsets_size;
  tr.data.ptr.buffer = (binder_uintptr_t)( (uintptr_t)t->buffer->data + proc->user_buffer_offset);/*數據存儲的地址*/
  tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size,sizeof(void *));

  if (put_user(cmd, (uint32_t __user *)ptr))
   return -EFAULT;
  ptr += sizeof(uint32_t);
  if (copy_to_user(ptr, &tr, sizeof(tr)))/*將tr數據copy到ptr指向位置*/
   return -EFAULT;
  ptr += sizeof(tr);/*指針跳過一個tr大小的位置*/

  trace_binder_transaction_received(t);
  binder_stat_br(proc, thread, cmd);

  list_del(&t->work.entry);/*刪除t*/
  t->buffer->allow_user_free = 1;
  if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
   t->to_parent = thread->transaction_stack;
   t->to_thread = thread;
   thread->transaction_stack = t;
  } else {
   t->buffer->transaction = NULL;
   kfree(t);
   binder_stats_deleted(BINDER_STAT_TRANSACTION);

}/*然後整個binder_ioctl函數一輪的循環執行完*/
 return 0;
}

參考 : Binder 通信筆記(Java) 《深入理解android 內核設計思想》

在這裏插入圖片描述

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章