Framework层JNI侧Binder
目录
一,Binder JNI在整个系统的位置
1.1 小结
二,代码分析
2.1 BBinder创建
2.2 Bpinder是在查找服务时候创建的
2.3 JNI实现
2.4 JNI层android_os_BinderProxy_transact
2.5 BPProxy实现
2)调用IPCThreadState发送数据到Binder驱动
2.6 waitForResponse处理
2.7 talkWithDriver处理
三,Binder服务端分析
3.1 Binder服务端的驱动入口 getAndExecuteCommand
3.2 executeCommand执行回掉
3.3 BBinder处理回掉
3.4 再次回到android_util_Binder.cpp里
3.5 回调Java层Binder
四,结束语
一,Binder JNI在整个系统的位置
1.1 小结
1)上面红色圈出来的这部分就是Binder JNI,以及整个Android 系统JNI所在的位置,链接这Java层和Native层的纽带,起到承上启下的作用,就和系统的syscal函数一样
2)在研究Binder的时候,需要关注几个概念
Java层 | native层 | 备注 |
Binder.ava | Binder.cpp--JavaBBinder | BinderProxy.java是Binder.ava的内部类 |
BinderProxy.java | BpBinder.cpp-->BpBinder | ProcessState.cpp。getStrongProxyForHandle创建BpBinder |
mObject | gBinderOffsets.mObject | mObject其实就是BpBinder |
mOwner | ||
mDescriptor | ||
init | static void android_os_Binder_init(JNIEnv* env, jobject obj) | { "init", "()V", (void*)android_os_Binder_init }, |
execTransact | mExecTransact |
二,代码分析
java层Binder分析
*/
public class Binder implements IBinder {private long mObject;public Binder() {init();}public @Nullable IInterface queryLocalInterface(@NonNull String descriptor) {if (mDescriptor.equals(descriptor)) {return mOwner;}return null;}protected boolean onTransact(int code, @NonNull Parcel data, @Nullable Parcel reply,int flags) throws RemoteException {if (code == INTERFACE_TRANSACTION) {reply.writeString(getInterfaceDescriptor());return true;} else if (code == DUMP_TRANSACTION) {ParcelFileDescriptor fd = data.readFileDescriptor();String[] args = data.readStringArray();if (fd != null) {try {dump(fd.getFileDescriptor(), args);} finally {IoUtils.closeQuietly(fd);}}// Write the StrictMode header.return true;}return false;}public final boolean transact(int code, @NonNull Parcel data, @Nullable Parcel reply,int flags) throws RemoteException {boolean r = onTransact(code, data, reply, flags);if (reply != null) {reply.setDataPosition(0);}return r;}public void linkToDeath(@NonNull DeathRecipient recipient, int flags) {}/*** Local implementation is a no-op.*/public boolean unlinkToDeath(@NonNull DeathRecipient recipient, int flags) {return true;}private native final void init();private native final void destroyBinder();// Entry point from android_util_Binder.cpp's onTransactprivate boolean execTransact(int code, long dataObj, long replyObj,int flags) {res = onTransact(code, data, reply, flags);} return res;}
}final class BinderProxy implements IBinder {public native boolean pingBinder();public native boolean isBinderAlive();public IInterface queryLocalInterface(String descriptor) {return null;}public boolean transact(int code, Parcel data, Parcel reply, int flags) throws RemoteException {try {return transactNative(code, data, reply, flags);} finally {}}public native String getInterfaceDescriptor() throws RemoteException;public native boolean transactNative(int code, Parcel data, Parcel reply,int flags) throws RemoteException;public native void linkToDeath(DeathRecipient recipient, int flags)throws RemoteException;public native boolean unlinkToDeath(DeathRecipient recipient, int flags);BinderProxy() {mSelf = new WeakReference(this);}private native final void destroy();final private WeakReference mSelf;private long mObject;private long mOrgue;
}
2.1 BBinder创建
整个java层Binder代码其实很简单,主要的对象就两个,客户端拥有的是BinderProxy,服务端拿到的是Binder对象,要特别注意的是Native层的BBinder是在Java层的Binder构造函数中同时初始化的
2.2 Bpinder是在查找服务时候创建的
AndroidRuntime::startReg(JNIEnv* env)注册的Binder JNI
REG_JNI(register_android_view_KeyCharacterMap),REG_JNI(register_android_os_Process),REG_JNI(register_android_os_SystemProperties),REG_JNI(register_android_os_Binder),
2.3 JNI实现
/Volumes/aosp/android-8.1.0_r52/frameworks/base/core/jni/android_util_Binder.cpp
int register_android_os_Binder(JNIEnv* env)
{if (int_register_android_os_Binder(env) < 0)return -1;if (int_register_android_os_BinderInternal(env) < 0)return -1;if (int_register_android_os_BinderProxy(env) < 0)return -1;jclass clazz = FindClassOrDie(env, "android/util/Log");gLogOffsets.mClass = MakeGlobalRefOrDie(env, clazz);gLogOffsets.mLogE = GetStaticMethodIDOrDie(env, clazz, "e","(Ljava/lang/String;Ljava/lang/String;Ljava/lang/Throwable;)I");clazz = FindClassOrDie(env, "android/os/ParcelFileDescriptor");gParcelFileDescriptorOffsets.mClass = MakeGlobalRefOrDie(env, clazz);gParcelFileDescriptorOffsets.mConstructor = GetMethodIDOrDie(env, clazz, "<init>","(Ljava/io/FileDescriptor;)V");clazz = FindClassOrDie(env, "android/os/StrictMode");gStrictModeCallbackOffsets.mClass = MakeGlobalRefOrDie(env, clazz);gStrictModeCallbackOffsets.mCallback = GetStaticMethodIDOrDie(env, clazz,"onBinderStrictModePolicyChange", "(I)V");clazz = FindClassOrDie(env, "java/lang/Thread");gThreadDispatchOffsets.mClass = MakeGlobalRefOrDie(env, clazz);gThreadDispatchOffsets.mDispatchUncaughtException = GetMethodIDOrDie(env, clazz,"dispatchUncaughtException", "(Ljava/lang/Throwable;)V");gThreadDispatchOffsets.mCurrentThread = GetStaticMethodIDOrDie(env, clazz, "currentThread","()Ljava/lang/Thread;");return 0;
}
2.4 JNI层android_os_BinderProxy_transact
static jboolean android_os_BinderProxy_transact(JNIEnv* env, jobject obj,jint code, jobject dataObj, jobject replyObj, jint flags) // throws RemoteException
{if (dataObj == NULL) {jniThrowNullPointerException(env, NULL);return JNI_FALSE;}Parcel* data = parcelForJavaObject(env, dataObj);if (data == NULL) {return JNI_FALSE;}Parcel* reply = parcelForJavaObject(env, replyObj);if (reply == NULL && replyObj != NULL) {return JNI_FALSE;}IBinder* target = (IBinder*)env->GetLongField(obj, gBinderProxyOffsets.mObject);if (target == NULL) {jniThrowException(env, "java/lang/IllegalStateException", "Binder has been finalized!");return JNI_FALSE;}ALOGV("Java code calling transact on %p in Java object %p with code %" PRId32 "\n",target, obj, code);bool time_binder_calls;int64_t start_millis;if (kEnableBinderSample) {// Only log the binder call duration for things on the Java-level main thread.// But if we don'ttime_binder_calls = should_time_binder_calls();if (time_binder_calls) {start_millis = uptimeMillis();}}//printf("Transact from Java code to %p sending: ", target); data->print();status_t err = target->transact(code, *data, reply, flags);//if (reply) printf("Transact from Java code to %p received: ", target); reply->print();
这块代码最核心的做了两件事情
1)找到target
IBinder* target = (IBinder*)env->GetLongField(obj, gBinderProxyOffsets.mObject);
其实就是BpBinder
3)调用target-->transact
2.5 BPProxy实现
/Volumes/aosp/android-8.1.0_r52/frameworks/native/libs/binder/BpBinder.cpp
status_t BpBinder::transact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{// Once a binder has died, it will never come back to life.if (mAlive) {status_t status = IPCThreadState::self()->transact(mHandle, code, data, reply, flags);if (status == DEAD_OBJECT) mAlive = 0;return status;}return DEAD_OBJECT;
}
这块最核心的也是做了2年事情
1):创建新线程执行任务
/Volumes/aosp/android-8.1.0_r52/frameworks/native/libs/binder/IPCThreadState.cpp
从下面这块的代码可以看到,每次业务层调用一次binder发送数据到服务端都会创建一个新的binder线程,这个会导致binder驱动调用到Binder服务端创建新的线程,要特别注意这里的
mHandle,是根据这个mHandle,binder驱动找到对应的ref-->node-->proc
IPCThreadState* IPCThreadState::self()
{if (gHaveTLS) {
restart:const pthread_key_t k = gTLS;IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);if (st) return st;return new IPCThreadState;}if (gShutdown) {ALOGW("Calling IPCThreadState::self() during shutdown is dangerous, expect a crash.\n");return NULL;}pthread_mutex_lock(&gTLSMutex);if (!gHaveTLS) {int key_create_value = pthread_key_create(&gTLS, threadDestructor);if (key_create_value != 0) {pthread_mutex_unlock(&gTLSMutex);ALOGW("IPCThreadState::self() unable to create TLS key, expect a crash: %s\n",strerror(key_create_value));return NULL;}gHaveTLS = true;}pthread_mutex_unlock(&gTLSMutex);goto restart;
}
2)调用IPCThreadState发送数据到Binder驱动
/Volumes/aosp/android-8.1.0_r52/frameworks/native/libs/binder/IPCThreadState.cpp
status_t IPCThreadState::transact(int32_t handle,uint32_t code, const Parcel& data,Parcel* reply, uint32_t flags)
{-----------------------if (err == NO_ERROR) {LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),(flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);}if (err != NO_ERROR) {if (reply) reply->setError(err);return (mLastError = err);}if ((flags & TF_ONE_WAY) == 0) {#if 0if (code == 4) { // relayoutALOGI(">>>>>> CALLING transaction 4");} else {ALOGI(">>>>>> CALLING transaction %d", code);}#endifif (reply) {err = waitForResponse(reply);} else {Parcel fakeReply;err = waitForResponse(&fakeReply);}-----------------------}return err;
}
1)通过writeTransactionData组装好数据,也就是我们说的打包数据,把数据打包到
binder_transaction_data对象
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{binder_transaction_data tr;tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */tr.target.handle = handle;tr.code = code;tr.flags = binderFlags;tr.cookie = 0;tr.sender_pid = 0;tr.sender_euid = 0;
-------------------------mOut.writeInt32(cmd);mOut.write(&tr, sizeof(tr));return NO_ERROR;
}
2) 同步/异步处理,如果是同步的话,那么就在这里等结果,如果是异步的话,那么分两种情况,对于异步需要回复的,那么还是在这里等结果,否则就返回一个假结果
2.6 waitForResponse处理
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{uint32_t cmd;int32_t err;while (1) {if ((err=talkWithDriver()) < NO_ERROR) break;err = mIn.errorCheck();if (err < NO_ERROR) break;if (mIn.dataAvail() == 0) continue;cmd = (uint32_t)mIn.readInt32();IF_LOG_COMMANDS() {alog << "Processing waitForResponse Command: "<< getReturnString(cmd) << endl;}switch (cmd) {case BR_TRANSACTION_COMPLETE:if (!reply && !acquireResult) goto finish;break;case BR_DEAD_REPLY:err = DEAD_OBJECT;goto finish;case BR_FAILED_REPLY:err = FAILED_TRANSACTION;goto finish;
此时循环等待binder驱动的结果
2.7 talkWithDriver处理
status_t IPCThreadState::talkWithDriver(bool doReceive)
{if (mProcess->mDriverFD <= 0) {return -EBADF;}binder_write_read bwr;// Is the read buffer empty?const bool needRead = mIn.dataPosition() >= mIn.dataSize();// We don't want to write anything if we are still reading// from data left in the input buffer and the caller// has requested to read the next data.const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;bwr.write_size = outAvail;bwr.write_buffer = (uintptr_t)mOut.data();// This is what we'll read.if (doReceive && needRead) {bwr.read_size = mIn.dataCapacity();bwr.read_buffer = (uintptr_t)mIn.data();} else {bwr.read_size = 0;bwr.read_buffer = 0;}IF_LOG_COMMANDS() {TextOutput::Bundle _b(alog);if (outAvail != 0) {alog << "Sending commands to driver: " << indent;const void* cmds = (const void*)bwr.write_buffer;const void* end = ((const uint8_t*)cmds)+bwr.write_size;alog << HexDump(cmds, bwr.write_size) << endl;while (cmds < end) cmds = printCommand(alog, cmds);alog << dedent;}alog << "Size of receive buffer: " << bwr.read_size<< ", needRead: " << needRead << ", doReceive: " << doReceive << endl;}// Return immediately if there is nothing to do.if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;bwr.write_consumed = 0;bwr.read_consumed = 0;status_t err;do {IF_LOG_COMMANDS() {alog << "About to read/write, write size = " << mOut.dataSize() << endl;}
#if defined(__ANDROID__)if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)err = NO_ERROR;elseerr = -errno;
#elseerr = INVALID_OPERATION;
#endifif (mProcess->mDriverFD <= 0) {err = -EBADF;}IF_LOG_COMMANDS() {alog << "Finished read/write, write size = " << mOut.dataSize() << endl;}} while (err == -EINTR);
通过ioctl把客户端数据发给binder驱动,等待binder驱动的cmd和数据
三,Binder服务端分析
上面的第2篇章分析了Binder proxy侧的原理,接下来分析binder 服务端原理
Binder服务端的驱动入口是在Binder驱动里面返回的cmd和数据
3.1 Binder服务端的驱动入口 getAndExecuteCommand
/Volumes/aosp/android-8.1.0_r52/frameworks/native/libs/binder/IPCThreadState.cpp
status_t IPCThreadState::getAndExecuteCommand()
{status_t result;int32_t cmd;result = talkWithDriver();if (result >= NO_ERROR) {size_t IN = mIn.dataAvail();if (IN < sizeof(int32_t)) return result;cmd = mIn.readInt32();IF_LOG_COMMANDS() {alog << "Processing top-level Command: "<< getReturnString(cmd) << endl;}pthread_mutex_lock(&mProcess->mThreadCountLock);mProcess->mExecutingThreadsCount++;if (mProcess->mExecutingThreadsCount >= mProcess->mMaxThreads &&mProcess->mStarvationStartTimeMs == 0) {mProcess->mStarvationStartTimeMs = uptimeMillis();}pthread_mutex_unlock(&mProcess->mThreadCountLock);result = executeCommand(cmd);pthread_mutex_lock(&mProcess->mThreadCountLock);mProcess->mExecutingThreadsCount--;if (mProcess->mExecutingThreadsCount < mProcess->mMaxThreads &&mProcess->mStarvationStartTimeMs != 0) {int64_t starvationTimeMs = uptimeMillis() - mProcess->mStarvationStartTimeMs;if (starvationTimeMs > 100) {ALOGE("binder thread pool (%zu threads) starved for %" PRId64 " ms",mProcess->mMaxThreads, starvationTimeMs);}mProcess->mStarvationStartTimeMs = 0;}pthread_cond_broadcast(&mProcess->mThreadCountDecrement);pthread_mutex_unlock(&mProcess->mThreadCountLock);}return result;
}
具体Binder驱动如何回掉过来的可以参考android系统_Binder驱动原理-CSDN博客这篇文章
3.2 executeCommand执行回掉
status_t IPCThreadState::executeCommand(int32_t cmd)
{BBinder* obj;RefBase::weakref_type* refs;status_t result = NO_ERROR;switch ((uint32_t)cmd) {case BR_TRANSACTION:{binder_transaction_data tr;result = mIn.read(&tr, sizeof(tr));ALOG_ASSERT(result == NO_ERROR,"Not enough command data for brTRANSACTION");if (result != NO_ERROR) break;Parcel buffer;buffer.ipcSetDataReference(reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),tr.data_size,reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);const pid_t origPid = mCallingPid;const uid_t origUid = mCallingUid;const int32_t origStrictModePolicy = mStrictModePolicy;const int32_t origTransactionBinderFlags = mLastTransactionBinderFlags;mCallingPid = tr.sender_pid;mCallingUid = tr.sender_euid;mLastTransactionBinderFlags = tr.flags;//ALOGI(">>>> TRANSACT from pid %d uid %d\n", mCallingPid, mCallingUid);Parcel reply;status_t error;IF_LOG_TRANSACTIONS() {TextOutput::Bundle _b(alog);alog << "BR_TRANSACTION thr " << (void*)pthread_self()<< " / obj " << tr.target.ptr << " / code "<< TypeCode(tr.code) << ": " << indent << buffer<< dedent << endl<< "Data addr = "<< reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer)<< ", offsets addr="<< reinterpret_cast<const size_t*>(tr.data.ptr.offsets) << endl;}if (tr.target.ptr) {// We only have a weak reference on the target object, so we must first try to// safely acquire a strong reference before doing anything else with it.if (reinterpret_cast<RefBase::weakref_type*>(tr.target.ptr)->attemptIncStrong(this)) {error = reinterpret_cast<BBinder*>(tr.cookie)->transact(tr.code, buffer,&reply, tr.flags);reinterpret_cast<BBinder*>(tr.cookie)->decStrong(this);} else {error = UNKNOWN_TRANSACTION;}} else {error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);}//ALOGI("<<<< TRANSACT from pid %d restore pid %d uid %d\n",// mCallingPid, origPid, origUid);if ((tr.flags & TF_ONE_WAY) == 0) {LOG_ONEWAY("Sending reply to %d!", mCallingPid);if (error < NO_ERROR) reply.setError(error);sendReply(reply, 0);} else {LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);}mCallingPid = origPid;mCallingUid = origUid;mStrictModePolicy = origStrictModePolicy;mLastTransactionBinderFlags = origTransactionBinderFlags;IF_LOG_TRANSACTIONS() {TextOutput::Bundle _b(alog);alog << "BC_REPLY thr " << (void*)pthread_self() << " / obj "<< tr.target.ptr << ": " << indent << reply << dedent << endl;}}break;
这里最核心的就是做了2年事情
1)BR_TRANSACTION,下面这几句代码才是真正的干货
if (tr.target.ptr) {/ if (reinterpret_cast<RefBase::weakref_type*>(tr.target.ptr)->attemptIncStrong(this)) {error = reinterpret_cast<BBinder*>(tr.cookie)->transact(tr.code, buffer,&reply, tr.flags);reinterpret_cast<BBinder*>(tr.cookie)->decStrong(this);} else {error = UNKNOWN_TRANSACTION;}} else {error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);}if ((tr.flags & TF_ONE_WAY) == 0) {LOG_ONEWAY("Sending reply to %d!", mCallingPid);if (error < NO_ERROR) reply.setError(error);sendReply(reply, 0);} else {LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);}
我们看到这里是通过弱引用拿到BBinder,如果弱引用拿不到,就通过强引用那,最后调用阿斗BBinder的transact
2)同步处理
tr.flags & TF_ONE_WAY) == 0表示同步,
status_t IPCThreadState::sendReply(const Parcel& reply, uint32_t flags)
{status_t err;status_t statusBuffer;err = writeTransactionData(BC_REPLY, flags, -1, 0, reply, &statusBuffer);if (err < NO_ERROR) return err;return waitForResponse(NULL, NULL);
}
3.3 BBinder处理回掉
status_t BBinder::transact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{data.setDataPosition(0);status_t err = NO_ERROR;switch (code) {case PING_TRANSACTION:reply->writeInt32(pingBinder());break;default:err = onTransact(code, data, reply, flags);break;}if (reply != NULL) {reply->setDataPosition(0);}return err;
}
这里其实很关键,又再一次回到了android_util_Binder.cpp里面,class JavaBBinder : public BBinder,因为JavaBBinder继承了BBinder,所以会调用到JavaBBinder里面的onTransact方法
3.4 再次回到android_util_Binder.cpp里
virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags = 0){JNIEnv* env = javavm_to_jnienv(mVM);--------------------------IPCThreadState* thread_state = IPCThreadState::self();const int32_t strict_policy_before = thread_state->getStrictModePolicy();//printf("Transact from %p to Java code sending: ", this);//data.print();//printf("\n");jboolean res = env->CallBooleanMethod(mObject, gBinderOffsets.mExecTransact,code, reinterpret_cast<jlong>(&data), reinterpret_cast<jlong>(reply), flags);
----------------------------------------------------return res != JNI_FALSE ? NO_ERROR : UNKNOWN_TRANSACTION;}
通过jni反射调用java层的Binder对象execTransact方法
jboolean res = env->CallBooleanMethod(mObject, gBinderOffsets.mExecTransact,code, reinterpret_cast<jlong>(&data), reinterpret_cast<jlong>(reply), flags);
3.5 回调Java层Binder
/Volumes/aosp/android-8.1.0_r52/frameworks/base/core/java/android/os/Binder.java
private boolean execTransact(int code, long dataObj, long replyObj,int flags) {--------------------------------res = onTransact(code, data, reply, flags);} c--------------------------------} }
到这里整个过程重于高明白了接下来的onTransact方法就会回到业务自己写的Service中
四,如何寻找Java层对应的JNI
这里有一个经验之谈,凡是在/Volumes/aosp/android-8.1.0_r52/frameworks/base/core/jni/这个目录下面的全是Java层对应的jni层,凡是以android_开头_.cpp结尾的都是Javac层对应的Jni,这个命名规范是:android_模块_对应的java类名.cpp,比如aandroid_os_Parcel.cpp,这个属于os模块,对应的java类是Parcel.java
五,结束语
鉴于作者水平有限,文章之中难免有错误或者遗漏地方,欢迎大家批评指正,也欢迎大家讨论,积极评论哈,谢谢