这里使用media服务作为例子。
#frameworks/av/media/mediaserver/main_mediaserver.cpp
#define LOG_TAG "mediaserver"
//#define LOG_NDEBUG 0
#include
#include
#include
#include
#include
#include "RegisterExtensions.h"
// from LOCAL_C_INCLUDES
#include "MediaPlayerService.h"
#include "ResourceManagerService.h"
using namespace android;
int main(int argc __unused, char **argv __unused)
{
signal(SIGPIPE, SIG_IGN);
sp proc(ProcessState::self());//获取ProcessState
sp sm(defaultServiceManager());//获取BpServiceManager
ALOGI("ServiceManager: %p", sm.get());
MediaPlayerService::instantiate();//注册MediaPlayerService
ResourceManagerService::instantiate();//注册ResourceManagerService
registerExtensions();
::android::hardware::configureRpcThreadpool(16, false);
ProcessState::self()->startThreadPool();//开启线程池
IPCThreadState::self()->joinThreadPool();//将当前线程注册到线程池
::android::hardware::joinRpcThreadpool();
}
这里就是大部分服务注册的模板代码了。
获取ProcessState
获取BpServiceManager
注册当前服务
开启线程池
将当前线程注册到线程池中
获取BpServiceManager现在来具体看看这个过程。ProcessState::self() 在上一篇中已经介绍过了会调用init创建一个单例并在创建的时候打开binder驱动调用mmap分配内存,获得binder的能力。接下来是defaultServiceManager()
#frameworks/native/libs/binder/IServiceManager.cpp
using AidlServiceManager = android::os::IServiceManager;
sp defaultServiceManager()
{
std::call_once(gSmOnce, []() {
..........
sp sm = nullptr;
while (sm == nullptr) {
sm = interface_cast(ProcessState::self()->getContextObject(nullptr));
if (sm == nullptr) {
ALOGE("Waiting 1s on context object on %s.", ProcessState::self()->getDriverName().c_str());
sleep(1);
}
}
gDefaultServiceManager = sp::make(sm);
});
return gDefaultServiceManager;
}
可以看到就是调用了ProcessState::self()->getContextObject(nullptr)拿到了一个某个对象,然后调用interface_cast转为了AidlServiceManager也就是android::os::IServiceManager对象。
#frameworks/native/libs/binder/ProcessState.cpp
sp ProcessState::getContextObject(const sp& /*caller*/)
{
sp context = getStrongProxyForHandle(0);
if (context) {
// The root object is special since we get it directly from the driver, it is never
// written by Parcell::writeStrongBinder.
internal::Stability::markCompilationUnit(context.get());
} else {
ALOGW("Not able to get context object on %s.", mDriverName.c_str());
}
return context;
}
sp ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp result;
AutoMutex _l(mLock);
if (handle == 0 && the_context_object != nullptr) return the_context_object;//对context_object做了换岑
handle_entry* e = lookupHandleLocked(handle);
if (e != nullptr) {
IBinder* b = e->binder;
if (b == nullptr || !e->refs->attemptIncWeak(this)) {
if (handle == 0) {
IPCThreadState* ipc = IPCThreadState::self();
CallRestriction originalCallRestriction = ipc->getCallRestriction();
ipc->setCallRestriction(CallRestriction::NONE);
Parcel data;
status_t status = ipc->transact(
0, IBinder::PING_TRANSACTION, data, nullptr, 0);
ipc->setCallRestriction(originalCallRestriction);//判断是否是有效的context_object
if (status == DEAD_OBJECT)
return nullptr;
}
sp b = BpBinder::PrivateAccessor::create(handle);//调用的是BpBinder::create(handle)这里handle为null在c++就是0,创建了一个handle为0的Bpbinder。
e->binder = b.get();
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
class PrivateAccessor {
private:
friend class BpBinder;
friend class ::android::Parcel;
friend class ::android::ProcessState;
friend class ::android::RpcSession;
friend class ::android::RpcState;
explicit PrivateAccessor(const BpBinder* binder) : mBinder(binder) {}
static sp create(int32_t handle) { return BpBinder::create(handle); }
static sp create(const sp& session, uint64_t address) {
return BpBinder::create(session, address);
}
// valid if !isRpcBinder
int32_t binderHandle() const { return mBinder->binderHandle(); }
// valid if isRpcBinder
uint64_t rpcAddress() const { return mBinder->rpcAddress(); }
const sp& rpcSession() const { return mBinder->rpcSession(); }
const BpBinder* mBinder;
};
这里可以知道ProcessState::self()->getContextObject(nullptr)拿到的就是一个handle为0的Bpbinder。接下来看interface_cast
#frameworks/native/libs/binder/include/binder/IInterface.h
template
inline sp interface_cast(const sp& obj)
{
return INTERFACE::asInterface(obj);
}
在很多地方都能看到interface_cast这个方法,它就是一个模板函数调用了asInterface。因为是interface_cast所以就是去android::os::IServiceManager里寻找asInterface方法,这里有一个主要注意的点,在最新的Android12中,Servicemanager已经不是直接和binder驱动交互了,它的实现通过aidl来系统生成代码,这个IServiceManager不是当前frameworks下的这个文件,是通过aidl生成的。在我电脑上编译之后的路径是**/out/soong/.intermediates/frameworks/native/libs/binder/libbinder/android_vendor.31_arm_armv7-a-neon_shared/gen/aidl/android/os**
#IServiceManager.h
class IServiceManager : public ::android::IInterface {
public:
DECLARE_META_INTERFACE(ServiceManager)
........
}
#IServiceManager.cpp
namespace os {
DO_NOT_DIRECTLY_USE_ME_IMPLEMENT_META_INTERFACE(ServiceManager, "android.os.IServiceManager")
}
#define DECLARE_META_INTERFACE(INTERFACE) \
public: \
static const ::android::String16 descriptor; \
static ::android::sp asInterface(const ::android::sp<::android::IBinder>& obj); \
virtual const ::android::String16& getInterfaceDescriptor() const; \
I##INTERFACE(); \
virtual ~I##INTERFACE(); \
static bool setDefaultImpl(::android::sp impl); \
static const ::android::sp& getDefaultImpl(); \
\
private: \
static ::android::sp default_impl; \
#define DO_NOT_DIRECTLY_USE_ME_IMPLEMENT_META_INTERFACE(INTERFACE, NAME) \
const ::android::StaticString16 I##INTERFACE##_descriptor_static_str16( \
__IINTF_CONCAT(u, NAME)); \
const ::android::String16 I##INTERFACE::descriptor(I##INTERFACE##_descriptor_static_str16); \
DO_NOT_DIRECTLY_USE_ME_IMPLEMENT_META_INTERFACE0(I##INTERFACE, I##INTERFACE, Bp##INTERFACE)
// Macro to be used by both IMPLEMENT_META_INTERFACE and IMPLEMENT_META_NESTED_INTERFACE
#define DO_NOT_DIRECTLY_USE_ME_IMPLEMENT_META_INTERFACE0(ITYPE, INAME, BPTYPE) \
const ::android::String16& ITYPE::getInterfaceDescriptor() const { return ITYPE::descriptor; } \
::android::sp ITYPE::asInterface(const ::android::sp<::android::IBinder>& obj) { \
::android::sp intr; \
if (obj != nullptr) { \
intr = ::android::sp::cast(obj->queryLocalInterface(ITYPE::descriptor)); \
if (intr == nullptr) { \
intr = ::android::sp::make(obj); \
} \
} \
return intr; \
} \
::android::sp ITYPE::default_impl; \
bool ITYPE::setDefaultImpl(::android::sp impl) { \
/* Only one user of this interface can use this function */ \
/* at a time. This is a heuristic to detect if two different */ \
/* users in the same process use this function. */ \
assert(!ITYPE::default_impl); \
if (impl) { \
ITYPE::default_impl = std::move(impl); \
return true; \
} \
return false; \
} \
const ::android::sp& ITYPE::getDefaultImpl() { return ITYPE::default_impl; } \
ITYPE::INAME() {} \
ITYPE::~INAME() {}
直接寻找是没有asInterface的,它的实现是通过IInterface中的模板方法DECLARE_META_INTERFACE 和DO_NOT_DIRECTLY_USE_ME_IMPLEMENT_META_INTERFACE ,其中DECLARE_META_INTERFACE在头文件中主要就是一个方法的声明,DO_NOT_DIRECTLY_USE_ME_IMPLEMENT_META_INTERFACE在cpp文件中实现方法,在其他服务的实现方式也是同样。把参数(ServiceManager, “android.os.IServiceManager”)带入模板发现就是用android::sp::make(Bpbinder(0))生成了一个BpServiceManager实例。终于拿到了BpServiceManager,回到defaultServiceManager()函数中,最后封装成了一个ServiceManagerShim对象。这里相对于原来的binder框架增加了一个中间层,通过aidl实现一个新的IServiceManager,BpServiceManager增加了一个封装类ServiceManagerShim,功能上没有很大区别。
注册当前服务void MediaPlayerService::instantiate() {
defaultServiceManager()->addService(
String16("media.player"), new MediaPlayerService());
}
//ServiceManagerShim
status_t ServiceManagerShim::addService(const String16& name, const sp& service,
bool allowIsolated, int dumpsysPriority)
{
Status status = mTheRealServiceManager->addService(
String8(name).c_str(), service, allowIsolated, dumpsysPriority);
return status.exceptionCode();
}
就是调用了ServiceManagerShim的addService,而ServiceManagerShim中的所有方法都是调用的BpServiceManager的方法。
//BpServiceManager.addService
::android::binder::Status BpServiceManager::addService(const ::std::string& name, const ::android::sp<::android::IBinder>& service, bool allowIsolated, int32_t dumpPriority) {
::android::Parcel _aidl_data;
_aidl_data.markForBinder(remoteStrong());
::android::Parcel _aidl_reply;
::android::status_t _aidl_ret_status = ::android::OK;
::android::binder::Status _aidl_status;
_aidl_ret_status = _aidl_data.writeInterfaceToken(getInterfaceDescriptor());
_aidl_ret_status = _aidl_data.writeUtf8AsUtf16(name);
_aidl_ret_status = _aidl_data.writeStrongBinder(service);
_aidl_ret_status = _aidl_data.writeBool(allowIsolated);
_aidl_ret_status = _aidl_data.writeInt32(dumpPriority);
_aidl_ret_status = remote()->transact(BnServiceManager::TRANSACTION_addService, _aidl_data, &_aidl_reply, 0);
.........
}
BpServiceManager首先将通讯数据都写入Parcel类型的 _aidl_data变量中,然后调用了BpBinder(0).transact()给binder发送了一个消息。BpBinder实际又是调用了IPCThreadState::self()->transact()
//BpBinder.CPP
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
........
status = IPCThreadState::self()->transact(binderHandle(), code, data, reply, flags);
........
}
//IPCThreadState.CPP
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
......
status_t err;
flags |= TF_ACCEPT_FDS;
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, nullptr);//写入
if (err != NO_ERROR) {
if (reply) reply->setError(err);
return (mLastError = err);
}
if ((flags & TF_ONE_WAY) == 0) {
if (reply) {
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
} else {
err = waitForResponse(nullptr, nullptr);
}
......
return err;
}
其中就两件事情,一个是writeTransactionData写入数据,一个是waitForResponse发送消息等待数据返回,这里也判断了是否是ONE_WAY,如果是ONE_WAY就调用waitForResponse直接传null返回。
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;
tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
tr.target.handle = handle;
tr.code = code;
tr.flags = binderFlags;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
tr.data.ptr.offsets = data.ipcObjects();
} else if (statusBuffer) {
tr.flags |= TF_STATUS_CODE;
*statusBuffer = err;
tr.data_size = sizeof(status_t);
tr.data.ptr.buffer = reinterpret_cast(statusBuffer);
tr.offsets_size = 0;
tr.data.ptr.offsets = 0;
} else {
return (mLastError = err);
}
mOut.writeInt32(cmd);
mOut.write(&tr, sizeof(tr));
return NO_ERROR;
}
writeTransactionData其实也没有真正传送数据,它只是将数据写入了mOut中。
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
cmd = (uint32_t)mIn.readInt32();
switch (cmd) {
......
case BR_ACQUIRE_RESULT:
.......
goto finish;
case BR_REPLY:
......
goto finish;
default:
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}
}
return err;
}
可以看到waitForResponse才是真正开启交互的地方,首先通过talkWithDriver与驱动交互,然后通过返回的cmd来处理返回的指令。
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
if (mProcess->mDriverFD < 0) {
return -EBADF;
}
binder_write_read bwr;//传输结构
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
#if defined(__ANDROID__)
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
} while (err == -EINTR);
........
return err;
}
talkWithDriver中做的事情也很简单
将数据转换成binder_write_read对象通过ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr)将数据和驱动交互//binder驱动
case BC_TRANSACTION_SG:
case BC_REPLY_SG: {
struct binder_transaction_data_sg tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr.transaction_data,
cmd == BC_REPLY_SG, tr.buffers_size);
break;
}
//binder_transaction()
if (tr->target.handle) {
} else {
mutex_lock(&context->context_mgr_node_lock);
target_node = context->binder_context_mgr_node;
mutex_unlock(&context->context_mgr_node_lock);
}
binder_transaction中如果handle为0那么直接拿binder_context_mgr_node,这个就是servicemanager注册时候保存的binder_node,通过这个node就能将指令传输给servicemanager。在上一篇中知道servicemanager在注册之后通过BinderCallback来监听客户端的请求。而BinderCallback最终调用了 IPCThreadState::getAndExecuteCommand->IPCThreadState::executeCommand。
status_t IPCThreadState::executeCommand(int32_t cmd)
{
.......
case BR_TRANSACTION_SEC_CTX:
case BR_TRANSACTION:
error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);//BBinder的transact会调用onTransact
return result;
}
servicemanager的BBinder是通过继承os::BnServiceManager实现的。具体的实现是在IServiceManager。
class BnServiceManager : public ::android::BnInterface
//IServiceManager.cpp
::android::status_t BnServiceManager::onTransact(uint32_t _aidl_code, const ::android::Parcel& _aidl_data, ::android::Parcel* _aidl_reply, uint32_t _aidl_flags) {
::android::status_t _aidl_ret_status = ::android::OK;
switch (_aidl_code) {
.........
case BnServiceManager::TRANSACTION_addService:
{
::std::string in_name;
::android::sp<::android::IBinder> in_service;
bool in_allowIsolated;
int32_t in_dumpPriority;
::android::binder::Status _aidl_status(addService(in_name, in_service, in_allowIsolated, in_dumpPriority));
_aidl_ret_status = _aidl_status.writeToParcel(_aidl_reply);
if (((_aidl_ret_status) != (::android::OK))) {
break;
}
if (!_aidl_status.isOk()) {
break;
}
}
break
}
remote()->transact(BnServiceManager::TRANSACTION_addService, _aidl_data, &_aidl_reply, 0)最后又通过ipc回到了ServiceManager.cpp。
//frameworks/native/cmds/servicemanager/ServiceManager.cpp
Status ServiceManager::addService(const std::string& name, const sp& binder, bool allowIsolated, int32_t dumpPriority) {
// implicitly unlinked when the binder is removed
if (binder->remoteBinder() != nullptr &&
binder->linkToDeath(sp::fromExisting(this)) != OK) {
LOG(ERROR) << "Could not linkToDeath when adding " << name;
return Status::fromExceptionCode(Status::EX_ILLEGAL_STATE, "linkToDeath failure");
}
// Overwrite the old service if it exists
mNameToService[name] = Service {
.binder = binder,
.allowIsolated = allowIsolated,
.dumpPriority = dumpPriority,
.debugPid = ctx.debugPid,
};
auto it = mNameToRegistrationCallback.find(name);
if (it != mNameToRegistrationCallback.end()) {
for (const sp& cb : it->second) {
mNameToService[name].guaranteeClient = true;
// permission checked in registerForNotifications
cb->onRegistration(name, binder);
}
}
return Status::ok();
}
用name和binder注册到了mNameToService中。到这里注册就结束了。
ProcessState::self()->startThreadPool() &joinThreadPool()void ProcessState::startThreadPool()
{
AutoMutex _l(mLock);
if (!mThreadPoolStarted) {//保证调用一次
if (mMaxThreads == 0) {
ALOGW("Extra binder thread started, but 0 threads requested. Do not use "
"*startThreadPool when zero threads are requested.");
}
mThreadPoolStarted = true;
spawnPooledThread(true);
}
}
void ProcessState::spawnPooledThread(bool isMain)
{
if (mThreadPoolStarted) {
String8 name = makeBinderThreadName();//拿到binder线程的名字
ALOGV("Spawning new pooled thread, name=%s\n", name.string());
sp t = sp::make(isMain);//创建一个main PoolThread
t->run(name.string());
pthread_mutex_lock(&mThreadCountLock);
mKernelStartedThreads++;
pthread_mutex_unlock(&mThreadCountLock);
}
}
String8 ProcessState::makeBinderThreadName() {
int32_t s = android_atomic_add(1, &mThreadPoolSeq);
pid_t pid = getpid();
std::string_view driverName = mDriverName.c_str();
android::base::ConsumePrefix(&driverName, "/dev/");
String8 name;
name.appendFormat("%.*s:%d_%X", static_cast(driverName.length()), driverName.data(), pid,
s);
return name;
}
startThreadPool保证了只运行一次然后调用了spawnPooledThread,用makeBinderThreadName中获取了Binder线程名,然后创建并运行了一个PoolThread的run方法。
class PoolThread : public Thread
{
public:
explicit PoolThread(bool isMain)
: mIsMain(isMain)
{
}
protected:
virtual bool threadLoop()//run实际运行的就是这个方法
{
IPCThreadState::self()->joinThreadPool(mIsMain);//这里也是调用了一次joinThreadPool()
return false;
}
const bool mIsMain;
};
void IPCThreadState::joinThreadPool(bool isMain)
{
LOG_THREADPOOL("**** THREAD %p (PID %d) IS JOINING THE THREAD POOL\n", (void*)pthread_self(), getpid());
pthread_mutex_lock(&mProcess->mThreadCountLock);
mProcess->mCurrentThreads++;
pthread_mutex_unlock(&mProcess->mThreadCountLock);
mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);//这里为true代表主线程不会超时,不然就是binder驱动生成的线程,将指令写入mOut,main是true就是注册为主线程,false就是驱动请求创建线程
mIsLooper = true;
status_t result;
do {
processPendingDerefs();
// now get the next command to be processed, waiting if necessary
result = getAndExecuteCommand();//和前面讲过的一样触发talkWithDriver,和驱动交互
if (result < NO_ERROR && result != TIMED_OUT && result != -ECONNREFUSED && result != -EBADF) {
LOG_ALWAYS_FATAL("getAndExecuteCommand(fd=%d) returned unexpected error %d, aborting",
mProcess->mDriverFD, result);
}
// Let this thread exit the thread pool if it is no longer
// needed and it is not the main process thread.
if(result == TIMED_OUT && !isMain) {//如果不是主线程触发超时就会退出
break;
}
} while (result != -ECONNREFUSED && result != -EBADF);
LOG_THREADPOOL("**** THREAD %p (PID %d) IS LEAVING THE THREAD POOL err=%d\n",
(void*)pthread_self(), getpid(), result);
mOut.writeInt32(BC_EXIT_LOOPER);//出了循环就发指令给驱动告诉驱动线程结束了
mIsLooper = false;
talkWithDriver(false);
pthread_mutex_lock(&mProcess->mThreadCountLock);
LOG_ALWAYS_FATAL_IF(mProcess->mCurrentThreads == 0,
"Threadpool thread count = 0. Thread cannot exist and exit in empty "
"threadpool\n"
"Misconfiguration. Increase threadpool max threads configuration\n");
mProcess->mCurrentThreads--;
pthread_mutex_unlock(&mProcess->mThreadCountLock);
}
下面就又进入了驱动层
case BC_REGISTER_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_REGISTER_LOOPER\n",
proc->pid, thread->pid);
binder_inner_proc_lock(proc);
if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
proc->pid, thread->pid);
} else if (proc->requested_threads == 0) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
proc->pid, thread->pid);
} else {
proc->requested_threads--;//将当前进程的requested_threads减少1
proc->requested_threads_started++;//将当前进程的requested_threads_started增加1
}
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;//将当前线程的looper标志位设置为BINDER_LOOPER_STATE_REGISTERED
binder_inner_proc_unlock(proc);
break;
case BC_ENTER_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_ENTER_LOOPER\n",
proc->pid, thread->pid);
if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
proc->pid, thread->pid);
}
thread->looper |= BINDER_LOOPER_STATE_ENTERED;//将当前线程的looper标志位设置为BINDER_LOOPER_STATE_ENTERED
break;
如果命令是BC_ENTER_LOOPER的话只是线程的标志位改了并不会去修改线程池中的变量。通过joinThreadPool(true)注册的线程都是不占用线程池的线程数量的。joinThreadPool方法的默认值是true所以在mediaserver中注册了两个线程到了线程池中,到这里mediaserver也就启动结束了。
binder驱动创建线程创建完线程池之后主线程会一直通过talkWithDriver与binder驱动交互,当有任务时候会通过binder_thread_read来处理。
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
........
retry:
binder_inner_proc_lock(proc);
wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);//检查thread的todo队列中是否有待处理的work,如果没有那就等待处理本进程todo队列的work
binder_inner_proc_unlock(proc);
thread->looper |= BINDER_LOOPER_STATE_WAITING;
if (non_block) {
if (!binder_has_work(thread, wait_for_proc_work))
ret = -EAGAIN;
} else {
ret = binder_wait_for_work(thread, wait_for_proc_work);//没有数据就进入等待直到有数据被唤醒
}
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
if (ret)
return ret;
while (1) {
uint32_t cmd;
struct binder_transaction_data_secctx tr;
struct binder_transaction_data *trd = &tr.transaction_data;
struct binder_work *w = NULL;
struct list_head *list = NULL;
struct binder_transaction *t = NULL;
struct binder_thread *t_from;
size_t trsize = sizeof(*trd);
binder_inner_proc_lock(proc);
if (!binder_worklist_empty_ilocked(&thread->todo))
list = &thread->todo;//读取线程队列
else if (!binder_worklist_empty_ilocked(&proc->todo) &&
wait_for_proc_work)
list = &proc->todo;//读取进程队列
else {
binder_inner_proc_unlock(proc);
/* no data added */
if (ptr - buffer == 4 && !thread->looper_need_return)
goto retry;//如果没数据就循环
break;
}
if (end - ptr < sizeof(tr) + 4) {
binder_inner_proc_unlock(proc);
break;
}
w = binder_dequeue_work_head_ilocked(list);//拿到事务
switch (w->type) {//根据类型处理事务
........
case BINDER_WORK_TRANSACTION: {
binder_inner_proc_unlock(proc);
t = container_of(w, struct binder_transaction, work);
} break;
case BINDER_WORK_DEAD_BINDER:
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
struct binder_ref_death *death;
uint32_t cmd;
binder_uintptr_t cookie;
.......
death = container_of(w, struct binder_ref_death, work);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
else
cmd = BR_DEAD_BINDER;
.......
if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */
} break;
}
if (!t)//只有BINDER_WORK_TRANSACTION命令才能继续往下执行
continue;
.......
done:
*consumed = ptr - buffer;
binder_inner_proc_lock(proc);
if (proc->requested_threads == 0 &&
list_empty(&thread->proc->waiting_threads) &&
proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
/*spawn a new thread if we leave this out */) {//判断是否新增线程
proc->requested_threads++;
binder_inner_proc_unlock(proc);
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BR_SPAWN_LOOPER\n",
proc->pid, thread->pid);
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))//发送BR_SPAWN_LOOPER
return -EFAULT;
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
} else
binder_inner_proc_unlock(proc);
return 0;
}
在done中创建新线程,触发done的只有BINDER_WORK_TRANSACTION和发送死亡通知的时候。done中创建线程有下面几个条件
当前进程的requested_threads 为0,就是没申请创建。可以线程waiting_threads 为0requested_threads_started运行线程小于最大线程值,这里就发现通过BC_ENTER_LOOPER进入线程池的线程不在这个范围里。当前线程是BINDER_LOOPER_STATE_REGISTERED或者BINDER_LOOPER_STATE_ENTERED的,就是通过joinThreadPool加入的,这个条件不知道什么时候会不满足。当都满足之后会增加proc->requested_threads++;并且将BR_SPAWN_LOOPER发送回用户空间。
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch ((uint32_t)cmd) {
........
case BR_SPAWN_LOOPER:
mProcess->spawnPooledThread(false);
break;
}
return result;
}
又回到了spawnPooledThread中,通过传入isMain为false来创建了一个普通的线程。这样Binder线程管理部分就闭环了。
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)