作者:李尚
binder_transaction函数主要负责的工作:
Binder与Handle的转换 (flat_binder_object)
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
size_t *offp, *off_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
**关键点1**
if (reply) {
in_reply_to = thread->transaction_stack;
thread->transaction_stack = in_reply_to->to_parent;
target_thread = in_reply_to->from;
target_proc = target_thread->proc;
}else {
if (tr->target.handle) {
struct binder_ref * ref;
ref = binder_get_ref(proc, tr->target.handle);
target_node = ref->node;
} else {
target_node = binder_context_mgr_node;
}
..。
**关键点2**
t = kzalloc(sizeof( * t), GFP_KERNEL);
...
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
**关键点3 **
off_end = (void *)offp + tr->offsets_size;
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
switch (fp->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct binder_ref *ref;
struct binder_node *node = binder_get_node(proc, fp->binder);
if (node == NULL) {
node = binder_new_node(proc, fp->binder, fp->cookie);
}..
ref = (target_proc, node); if (fp->type == BINDER_TYPE_BINDER)
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
fp->handle = ref->desc;
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref->node->proc == target_proc) {
if (fp->type == BINDER_TYPE_HANDLE)
fp->type = BINDER_TYPE_BINDER;
else
fp->type = BINDER_TYPE_WEAK_BINDER;
fp->binder = ref->node->ptr;
fp->cookie = ref->node->cookie;
} else {
struct binder_ref *new_ref;
new_ref = binder_get_ref_for_node(target_proc, ref->node);
fp->handle = new_ref->desc;
}
} break;
**关键点4** 将binder_work 插入到目标队列
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
wake_up_interruptible(target_wait);
return;
}
关键点1,找到目标进程,关键点2 创建binder_transaction与binder_work,关键点3 处理Binder实体与Handle转化,关键点4,将binder_work插入目标队列,并唤醒相应的等待队列,在处理Binder实体与Handle转化的时候,有下面几点注意的:
如此下来,写数据的流程所经历的数据结构就完了。再简单看一下被唤醒一方的读取流程,读取从阻塞在内核态的binder_thread_read开始,以传递而来的BC_TRANSACTION为例,binder_thread_read会根据一些场景添加BRXXX参数,标识驱动传给用户空间的数据流向:
enum BinderDriverReturnProtocol {
BR_ERROR = _IOR_BAD('r', 0, int),
BR_OK = _IO('r', 1),
BR_TRANSACTION = _IOR_BAD('r', 2, struct binder_transaction_data),
BR_REPLY = _IOR_BAD('r', 3, struct binder_transaction_data),
BR_ACQUIRE_RESULT = _IOR_BAD('r', 4, int),
BR_DEAD_REPLY = _IO('r', 5),
BR_TRANSACTION_COMPLETE = _IO('r', 6),
BR_INCREFS = _IOR_BAD('r', 7, struct binder_ptr_cookie),
BR_ACQUIRE = _IOR_BAD('r', 8, struct binder_ptr_cookie),
BR_RELEASE = _IOR_BAD('r', 9, struct binder_ptr_cookie),
BR_DECREFS = _IOR_BAD('r', 10, struct binder_ptr_cookie),
BR_ATTEMPT_ACQUIRE = _IOR_BAD('r', 11, struct binder_pri_ptr_cookie),
BR_NOOP = _IO('r', 12),
BR_SPAWN_LOOPER = _IO('r', 13),
BR_FINISHED = _IO('r', 14),
BR_DEAD_BINDER = _IOR_BAD('r', 15, void *),
BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR_BAD('r', 16, void *),
BR_FAILED_REPLY = _IO('r', 17),
};
之后,read线程根据binder_transaction新建binder_transaction_data对象,再通过copy_to_user,传递给用户空间,
static int
binder_thread_read(struct binder_proc *proc, struct binder_thread *thread,
void __user *buffer, int size, signed long *consumed, int non_block)
{
while (1) {
uint32_t cmd;
struct binder_transaction_data tr ;
struct binder_work *w;
struct binder_transaction *t = NULL;
if (!list_empty(&thread->todo))
w = list_first_entry(&thread->todo, struct binder_work, entry);
else if (!list_empty(&proc->todo) && wait_for_proc_work)
w = list_first_entry(&proc->todo, struct binder_work, entry);
else {
if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
goto retry;
break;
}
// 数据大小
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
// 偏移地址要加上
tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset;
tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));
// 写命令
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
// 写数据结构体到用户空间,
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
}
上层通过ioctrl等待的函数被唤醒,假设现在被唤醒的是服务端,一般会执行请求,这里首先通过Parcel的ipcSetDataReference函数将数据将数据映射到Parcel对象中,之后再通过BBinder的transact函数处理具体需求;
status_t IPCThreadState::executeCommand(int32_t cmd)
{
...
// read到了数据请求,这里是需要处理的逻辑 ,处理完毕,
case BR_TRANSACTION:
{
binder_transaction_data tr;
Parcel buffer;
buffer.ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(size_t), freeBuffer, this);
...
// 这里是处理 如果非空,就是数据有效,
if (tr.target.ptr) {
// 这里什么是tr.cookie
sp<BBinder> b((BBinder*)tr.cookie);
const status_t error = b->transact(tr.code, buffer, &reply, tr.flags);
if (error < NO_ERROR) reply.setError(error);
}
这里的 b->transact(tr.code, buffer, &reply, tr.flags);就同一开始Client调用transact( mHandle, code, data, reply, flags)函数对应的处理类似,进入相对应的业务逻辑。
在Binder通信的过程中,数据是从发起通信进程的用户空间直接写到目标进程内核空间,而这部分数据是直接映射到用户空间,必须等用户空间使用完数据才能释放,也就是说Binder通信中内核数据的释放时机应该是用户空间控制的,内种中释放内存空间的函数是binder_free_buf,其他的数据结构其实可以直接释放掉,执行这个函数的命令是BC_FREE_BUFFER。上层用户空间常用的入口是IPCThreadState::freeBuffer:
void IPCThreadState::freeBuffer(Parcel* parcel, const uint8_t* data, size_t dataSize,
const size_t* objects, size_t objectsSize,
void* cookie)
{
if (parcel != NULL) parcel->closeFileDescriptors();
IPCThreadState* state = self();
state->mOut.writeInt32(BC_FREE_BUFFER);
state->mOut.writeInt32((int32_t)data);
}
那什么时候会调用这个函数呢?在之前分析数据传递的时候,有一步是将binder_transaction_data中的数据映射到Parcel中去,其实这里是关键
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
int32_t cmd;
int32_t err;
while (1) {
...
case BR_REPLY:
{
binder_transaction_data tr;
// 注意这里是没有传输数据拷贝的,只有一个指针跟数据结构的拷贝,
err = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
if (err != NO_ERROR) goto finish;
// free buffer,先设置数据,直接
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
// 牵扯到数据利用,与内存释放
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(size_t),
freeBuffer, this);
Parcel 的ipcSetDataReference函数不仅仅能讲数据映射到Parcel对象,同时还能将数据的清理函数映射进来
void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
const size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
看函数定义中的release_func relFunc参数,这里就是指定内存释放函数,这里指定了IPCThreadState::freeBuffer函数,在Native层,Parcel在使用完,并走完自己的生命周期后,就会调用自己的析构函数,在其析构函数中调用了freeDataNoInit(),这个函数会间接调用上面设置的内存释放函数:
Parcel::~Parcel()
{
freeDataNoInit();
}
这就是数据释放的入口,进入内核空间后,执行binder_free_buf,将这次分配的内存释放,同时更新binder_proc的binder_buffer表,重新标记那些内存块被使用了,哪些没被使用。
static void binder_free_buf(struct binder_proc *proc,
struct binder_buffer *buffer)
{
size_t size, buffer_size;
buffer_size = binder_buffer_size(proc, buffer);
size = ALIGN(buffer->data_size, sizeof(void *)) +
ALIGN(buffer->offsets_size, sizeof(void *));
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"binder: %d: binder_free_buf %p size %zd buffer"
"_size %zd\n", proc->pid, buffer, size, buffer_size);
if (buffer->async_transaction) {
proc->free_async_space += size + sizeof(struct binder_buffer);
binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"binder: %d: binder_free_buf size %zd "
"async free %zd\n", proc->pid, size,
proc->free_async_space);
}
binder_update_page_range(proc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
NULL);
rb_erase(&buffer->rb_node, &proc->allocated_buffers);
buffer->free = 1;
if (!list_is_last(&buffer->entry, &proc->buffers)) {
struct binder_buffer *next = list_entry(buffer->entry.next,
struct binder_buffer, entry);
if (next->free) {
rb_erase(&next->rb_node, &proc->free_buffers);
binder_delete_free_buffer(proc, next);
}
}
if (proc->buffers.next != &buffer->entry) {
struct binder_buffer *prev = list_entry(buffer->entry.prev,
struct binder_buffer, entry);
if (prev->free) {
binder_delete_free_buffer(proc, buffer);
rb_erase(&prev->rb_node, &proc->free_buffers);
buffer = prev;
}
}
binder_insert_free_buffer(proc, buffer);
}
Java层类似,通过JNI调用Parcel的freeData()函数释放内存,在用户空间,每次执行BR_TRANSACTION或者BR_REPLY,都会利用freeBuffer发送请求,去释放内核中的内存
简单的Binder通信C/S模型
很多文章将Binder框架定义了四个角色:Server,Client,ServiceManager、以及Binder驱动,但这容易将人引导到歧途:好像所有的Binder服务都需要去ServiceManager去注册才能使用,其实不是这样。例如,平时APP开发通过bindService启动的服务,以及有些自己定义的AIDL远程调用,都不一定都ServiceManager注册这条路,个人理解:ServiceManager主要功能是:管理系统服务,比如AMS、WMS、PKMS服务等,而APP通过的bindService启动的Binder服务其实是由SystemServer的ActivityManagerService负责管理。这篇主要关注Android APP Java层Binder通信一些奇葩点:
相关阅读:
https://www.163yun.com/gift
本文来自网易实践者社区,经作者李尚授权发布。