博客
关于我
强烈建议你试试无所不能的chatGPT,快点击我
binder 驱动
阅读量:6608 次
发布时间:2019-06-24

本文共 25309 字,大约阅读时间需要 84 分钟。

  hot3.png

一:初识Binder Binder的用户空间为每个进程维持着一个线程池,线程池处理接收的IPC和本进程的本地消息;Binder通信是同步的。 守护进程Service Manager管理系统的各个服务。Service Manager监听是否有程序向其发送请求,有则响应,无则监听等待。 每个服务须向Service Manager注册,请求服务的客户端创建IBinder 接口用于IPC。所以Binder 机制基于C/S架构。 IPC中handle是本地进程对象,即本地进程的地址,而RPC中handle是远程对象的引用,是一个32位抽象的句柄。对发送者而言,均认为被发送Binder对象的是远端对象的句柄,而远端进程接收到Binder对象后均认为是本地对象地址。Binder驱动负责两种不同名称对象的正确映射。

二: Binder 驱动的原理及实现 Binder采用AIDL(android interface description language)来描述进程间通信的接口。Binder作为一个特殊的字符设备,其设备节点/dev/binder。主要代码实现在: kernel/drivers/staging/binder.h kernel/drivers/staging/binder.c 其中 binder_ioctl: 与用户空间进行数据交互,参数cmd区分不同的请求,BINDER_WRITE_READ表示读写数据。 binder_thread_write: 为copy_to_user服务。 get_user从用户空间获取请求并发送 或者 返回用户空间进程产生的结果. binder_thread_write函数调用binder_transaction函数转发请求并返回结果,当收到请求时,binder_transcation函数通过对象handle找到对象所在进程。将请求放到目标进程的队列中,等待目标进程读取。数据的解析工作在binder_parse()中实现。 binder_thread_read: 为copy_from_user 服务。 put_user读取结果。

1) Binder_work struct binder_work { struct list_head entry; //双链表 存储binder_work队列 enum { //binder工作状态 BINDER_WORK_TRANSACTION = 1, BINDER_WORK_TRANSACTION_COMPLETE, BINDER_WORK_NODE, BINDER_WORK_DEAD_BINDER, BINDER_WORK_DEAD_BINDER_AND_CLEAR, BINDER_WORK_CLEAR_DEATH_NOTIFICATION, } type;

};

2)binder类型 #define B_PACK_CHARS(c1, c2, c3, c4)

((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4)) #define B_TYPE_LARGE 0x85

enum { //binder类型 //本地对象地址 BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '', B_TYPE_LARGE), BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '', B_TYPE_LARGE), //远程对象引用 BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '', B_TYPE_LARGE), BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '', B_TYPE_LARGE), //文件,找到fd对应的文件 BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), };

3) binder对象-------进程间传递的数据 struct flat_binder_object {

unsigned long type; //binder类型 unsigned long flags; //同步或异步

union {                                   void           *binder;                    signed long       handle;              }; void                    *cookie;

};

enum transaction_flags { //flags TF_ONE_WAY = 0x01,

TF_ROOT_OBJECT = 0x04,
TF_STATUS_CODE = 0x08,
TF_ACCEPT_FDS = 0x10,
};

4) Binder对象传递的实际内容 struct binder_transaction_data {

union {               size_t       handle;                           void  *ptr;                             } target;     void           *cookie;                      unsigned int     code;                               unsigned int     flags;     pid_t                  sender_pid;     uid_t                  sender_euid;     size_t                 data_size;                 size_t                 offsets_size;                union {               struct {                                             const void         *buffer;                        const void         *offsets;                   } ptr;                       //target->ptr对应对象的数据               uint8_t     buf[8];              //target->handle对应对象的数据     } data;

};

  1. binder_write_read

struct binder_write_read { signed long write_size;

signed long write_consumed;
unsigned long write_buffer; //请求线程执行的Binder命令

signed long       read_size;                signed long       read_consumed;          unsigned long  read_buffer;     //线程执行后的返回结果

};

//BINDER_WRITE_READ读操作命令协议 enum BinderDriverReturnProtocol {

BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),  //翻译解析将要被处理的数据     BR_REPLY = _IOR('r', 3, struct binder_transaction_data),      //对返回结果数据的操作命令         ……

};

//BINDER_WRITE_READ写操作命令协议 enum BinderDriverCommandProtocol { BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data), BC_REPLY = _IOW('c', 1, struct binder_transaction_data),

……

};

6) Binder_thread 存储每一个单独线程的信息 struct binder_thread { struct binder_proc *proc; //属于哪一个进程 struct rb_node rb_node; int pid; int looper; //线程状态信息 struct binder_transaction *transaction_stack; //要发送和接收进程和线程的信息 struct list_head todo; uint32_t return_error;

uint32_t return_error2;

wait_queue_head_t wait;     struct binder_stats stats;

};

struct binder_stats { int br[_IOC_NR(BR_FAILED_REPLY) + 1]; //存储BINDER_WRITE_READ读操作协议 int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; //存储BINDER_WRITE_READ写操作协议 int obj_created[BINDER_STAT_COUNT]; //对象计数 int obj_deleted[BINDER_STAT_COUNT]; };

enum {

BINDER_LOOPER_STATE_REGISTERED = 0x01, BINDER_LOOPER_STATE_ENTERED = 0x02, BINDER_LOOPER_STATE_EXITED = 0x04, BINDER_LOOPER_STATE_INVALID = 0x08, BINDER_LOOPER_STATE_WAITING = 0x10, BINDER_LOOPER_STATE_NEED_RETURN = 0x20 };

7) binder_transcation 用来中转请求和返回结果,保持接收和发送进程的信息。 struct binder_transaction { int debug_id; struct binder_work work; struct binder_thread *from; //接收进程信息 struct binder_transaction *from_parent; //进程父节点信息 struct binder_proc *to_proc; struct binder_thread *to_thread; //要发送的进程信息 struct binder_transaction *to_parent; unsigned need_reply:1;

struct binder_buffer *buffer;     unsigned int     code;     unsigned int     flags;     long priority;     long saved_priority;     uid_t         sender_euid;  //Linux中包含用户ID(UID 进程由谁创建)和有效用户ID(EUID 发送进程对资源和文件的访问)

};

struct binder_buffer { struct list_head entry; struct rb_node rb_node;

unsigned free:1;     unsigned allow_user_free:1;     unsigned async_transaction:1;     unsigned debug_id:29;     struct binder_transaction *transaction;  //中转请求和返回结果     struct binder_node *target_node;     size_t data_size;     size_t offsets_size;     uint8_t data[0];  //存储实际数据

};

三:驱动分析。 一般从初始化开始分析 1) binder_init static int __init binder_init(void) { int ret;

binder_deferred_workqueue = create_singlethread_workqueue("binder"); //创建proc节点     if (!binder_deferred_workqueue)               return -ENOMEM;     binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); //注册文件系统根节点     if (binder_debugfs_dir_entry_root)               binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",                                                     binder_debugfs_dir_entry_root);     ret = misc_register(&binder_miscdev);      //注册Misc设备,使用misc来命名主要是表示该文件目前还没归类好     if (binder_debugfs_dir_entry_root) {   //创建各文件               debugfs_create_file("state",                    //proc/binder/state                                     S_IRUGO,                                     binder_debugfs_dir_entry_root,                                     NULL,                                     &binder_state_fops);               debugfs_create_file("stats",                    //proc/binder/stats                                     S_IRUGO,                                     binder_debugfs_dir_entry_root,                                     NULL,                                     &binder_stats_fops);               debugfs_create_file("transactions",             //proc/binder/transactions                                     S_IRUGO,                                     binder_debugfs_dir_entry_root,                                     NULL,                                     &binder_transactions_fops);               debugfs_create_file("transaction_log",          //proc/binder/transaction_log                                     S_IRUGO,                                     binder_debugfs_dir_entry_root,                                     &binder_transaction_log,                                     &binder_transaction_log_fops);               debugfs_create_file("failed_transaction_log",    //proc/binder/failed_transcation_log                                     S_IRUGO,                                     binder_debugfs_dir_entry_root,                                     &binder_transaction_log_failed,                                     &binder_transaction_log_fops);     }     return ret;

} __init和__exit标记函数,__initdata和__exitdata标记数据。 此宏定义可知标记后的函数与数据其实是放到了特定的(代码或数据)段中。标记为初始化的函数,表明该函数供在初始化期间使用。在模块装载之后,模块装载就会将初始化函数扔掉。这样可以将该函数占用的内存释放出来。 __exit修饰词标记函数只在模块卸载时使用。如果模块被直接编进内核则该函数就不会被调用。如果内核编译时没有包含该模块,则此标记的函数将被简单地丢弃。 _init不属于c的标准在内核代码里,这个表示把这个函数放在.init.text section里,在include/linux/init.h里有定义 #define __init attribute ((section (".init.text"))) 这个section的空间是会被回收的,section是和连接有关的概念

binder_init由device_initcall调用,而不是传统的module_init和module_exit。这是因为使binder驱动直接编译进内核,不支持动态编译,同时需要在Kernel中做镜像。可以修改为module_init。 misc_register在init进程中 在handle_device_fd(device_fd)函数中调用handle_device_event(&uevent)函数执行uevent_netlink事件在/dev文件下创建binder文件

static struct miscdevice binder_miscdev = { //动态获得主设备号 .minor = MISC_DYNAMIC_MINOR, .name = "binder", //设备名 .fops = &binder_fops //file_oprations };

static const struct file_operations binder_fops = { .owner = THIS_MODULE, .poll = binder_poll, .unlocked_ioctl = binder_ioctl, .mmap = binder_mmap, .open = binder_open, .flush = binder_flush, .release = binder_release, };

2) binder_open static int binder_open(struct inode *nodp, struct file *filp) { struct binder_proc *proc; //保存调用Binder的各个进程和线程的信息,进程线程ID,Binder状态等 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", current->group_leader->pid, current->pid);

proc = kzalloc(sizeof(*proc), GFP_KERNEL);  //kzalloc效果等同于先是用 kmalloc() 申请空间 , 然后用 memset() 来初始化 ,所有申请的元素都被初始化为 0.     if (proc == NULL)               return -ENOMEM;     get_task_struct(current);         //增加引用计数     proc->tsk = current;             //保持引用计数     INIT_LIST_HEAD(&proc->todo);    //初始化Binder_proc队列     init_waitqueue_head(&proc->wait);     proc->default_priority = task_nice(current);     mutex_lock(&binder_lock);     binder_stats_created(BINDER_STAT_PROC);         //增加BINDER_STAT_PROC     hlist_add_head(&proc->proc_node, &binder_procs);  //添加到HASH表     proc->pid = current->group_leader->pid;            //保持pid,private_data等数据     INIT_LIST_HEAD(&proc->delivered_death);     filp->private_data = proc;     mutex_unlock(&binder_lock);     if (binder_debugfs_dir_entry_proc) {           //创建proc/binder/proc/$pid文件               char strbuf[11];               snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);               proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,                        binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);     }     return 0;

} 步骤: l 创建并分配一个binder_proc空间来保持binder数据。 l 增加当前进程/线程的引用计数,并保存到binder_proc的tsk。 l 初始化binder_proc队列,包括使用INIT_LIST_HEAD初始化链表头todo,使用init_waitqueue_head初始化等待序列wait,设置默认优先级为当前进程的nice值。 l 增加BINDER_STAT_PROC的对象计数,并通过hlist_add_head把创建的binder_proc对象添加到全局的binder_proc哈希表中,这样一来,任何进程都可以访问其他进程的binder_proc对象了。 l 将当前线程组的 pid赋值给proc的pid字段,即thread_group指向线程组中的第一个线程的task_struct结构,同时把创建的binder_proc对象指针赋值给flip的private_data对象。 l 创建文件/proc/binder/proc/$pid,用来输出当前binder proc对象的状态,文件名以pid命名。Pid不是当前进程/线程的id,而是线程组的pid,即线程组中第一个线程的pid。另外,创建该文件时,也指定了操作该文件的函数接口为binder_read_proc参数即创建的binder_proc对像proc。

3) binder_release static int binder_release(struct inode *nodp, struct file *filp) { struct binder_proc *proc = filp->private_data; //获取进程,线程的pid, 然后删除/proc/binder/proc/$pid文件 debugfs_remove(proc->debugfs_entry); binder_defer_work(proc, BINDER_DEFERRED_RELEASE); //释放binder_proc对象的数据和分配的空间

return 0;

}

4) binder_flush //关闭一个设备文件描述符复制到时候被调用 static int binder_flush(struct file *filp, fl_owner_t id) { struct binder_proc *proc = filp->private_data; binder_defer_work(proc, BINDER_DEFERRED_FLUSH);

return 0;

}

5) binder_poll //非阻塞型IO操作的内核驱动实现。当用户程序read数据时,如果驱动程序没有准备好数据,则阻塞该进程,直到数据准备好。称为阻塞型IO操作

static unsigned int binder_poll(struct file *filp, struct poll_table_struct *wait) { struct binder_proc *proc = filp->private_data; struct binder_thread *thread = NULL; int wait_for_proc_work;

mutex_lock(&binder_lock);     thread = binder_get_thread(proc);           //获取当前进程/线程信息     wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo) && thread->return_error == BR_OK;     mutex_unlock(&binder_lock);     if (wait_for_proc_work) {                  //proc_work工作方式  进程?               if (binder_has_proc_work(proc, thread))                        return POLLIN;               poll_wait(filp, &proc->wait, wait);               if (binder_has_proc_work(proc, thread))                        return POLLIN;     } else {                                 //thread_work工作方式 线程?               if (binder_has_thread_work(thread))                        return POLLIN;               poll_wait(filp, &thread->wait, wait);               if (binder_has_thread_work(thread))                        return POLLIN;     }     return 0;

}

static int binder_has_proc_work(struct binder_proc *proc, struct binder_thread *thread) { return !list_empty(&proc->todo) || (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); }

static int binder_has_thread_work(struct binder_thread *thread) { return !list_empty(&thread->todo) || thread->return_error != BR_OK || (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); } 根据检测线程队列是否为空,线程循环状态信息和返回信息确定等待方式,然后调用wait_poll实现poll操作。

static struct binder_thread *binder_get_thread(struct binder_proc *proc) { struct binder_thread *thread = NULL; struct rb_node *parent = NULL; struct rb_node **p = &proc->threads.rb_node; //取得队列中的线程

while (*p) {               parent = *p;               thread = rb_entry(parent, struct binder_thread, rb_node);  //取得将要比较的线程               if (current->pid < thread->pid)                        p = &(*p)->rb_left;               else if (current->pid > thread->pid)                        p = &(*p)->rb_right;               else                        break;     }     if (*p == NULL) {          //如果不存在               thread = kzalloc(sizeof(*thread), GFP_KERNEL);               if (thread == NULL)                        return NULL;               binder_stats_created(BINDER_STAT_THREAD);               thread->proc = proc;               thread->pid = current->pid;               init_waitqueue_head(&thread->wait);       //初始化等待队列               INIT_LIST_HEAD(&thread->todo);           //初始化链表表头               rb_link_node(&thread->rb_node, parent, p);               rb_insert_color(&thread->rb_node, &proc->threads);               thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;   //设置线程循环状态               thread->return_error = BR_OK;               thread->return_error2 = BR_OK;     }     return thread;

}

6) binder_mmap mmap把设备内存(内核空间)映射到用户空间,从而用户程序可以在用户空间操作内核空间的地址; binder_mmap能映射的最大内存空间为4M,而且不能映射具有写权限的内存区域; binder_mmap现在内核虚拟映射表上获取一块可以使用的区域,然后分配物理页,再把物理页映射到虚拟映射表上; 由于设备内存的映射在mmap中实现,因此每个进程/线程都只能执行一次映射操作。

static int binder_mmap(struct file *filp, struct vm_area_struct *vma) { int ret; struct vm_struct *area; struct binder_proc *proc = filp->private_data; const char *failure_string; struct binder_buffer *buffer;

if ((vma->vm_end - vma->vm_start) > SZ_4M)             //检测映射内存大小               vma->vm_end = vma->vm_start + SZ_4M;     binder_debug(BINDER_DEBUG_OPEN_CLOSE,                    "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",                    proc->pid, vma->vm_start, vma->vm_end,                    (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,                    (unsigned long)pgprot_val(vma->vm_page_prot));     if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {       //检测flags               ret = -EPERM;               failure_string = "bad vm_flags";               goto err_bad_arg;     }     vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;     if (proc->buffer) {              //判断是否已经mmap过               ret = -EBUSY;               failure_string = "already mapped";               goto err_already_mapped;     }     area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);  //申请虚拟空间     if (area == NULL) {               ret = -ENOMEM;               failure_string = "get_vm_area";               goto err_get_vm_area_failed;     }     proc->buffer = area->addr;     proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;

#ifdef CONFIG_CPU_CACHE_VIPT if (cache_is_vipt_aliasing()) { while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); vma->vm_start += PAGE_SIZE; } } #endif proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); //分配pages空间 if (proc->pages == NULL) { ret = -ENOMEM; failure_string = "alloc page array"; goto err_alloc_pages_failed; } proc->buffer_size = vma->vm_end - vma->vm_start;

vma->vm_ops = &binder_vm_ops;     vma->vm_private_data = proc;

//真正开始分配物理空间 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { ret = -ENOMEM; failure_string = "alloc small buf"; goto err_alloc_small_buf_failed; } buffer = proc->buffer; INIT_LIST_HEAD(&proc->buffers); list_add(&buffer->entry, &proc->buffers); buffer->free = 1; binder_insert_free_buffer(proc, buffer); proc->free_async_space = proc->buffer_size / 2; barrier(); proc->files = get_files_struct(current); proc->vma = vma;

return 0;

err_alloc_small_buf_failed: kfree(proc->pages); proc->pages = NULL; err_alloc_pages_failed: vfree(proc->buffer); proc->buffer = NULL; err_get_vm_area_failed: err_already_mapped: err_bad_arg: printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); return ret; }

binder_update_page_range函数执行步骤: 1:alloc_page 分配页面 2:map_vm_area 为分配的内存做映射关系 3:vm_insert_page 把分配的物理页插入到用户VMA区域 总结binder_mmap顺序 1:检测内存映射条件,即映射内存大小(4MB),flags,是否已经映射过; 2:获取地址空间,并把此空间的地址记录到进程信息buffer中; 3:分配物理页面并记录下来; 4:将buffer插入到进程信息的buffer列表中; 5:调用buffer_update_page_range函数将分配的物理页面和vm空间对应起来; 6:通过binder_insert_free_buffer函数把此进程的buffer插入到进程信息中。

7) binder_ioctl binder的ioctl命令主要有七个: #define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) //读写操作 #define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, int64_t) #define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t) #define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, int) #define BINDER_SET_CONTEXT_MGR _IOW('b', 7, int) #define BINDER_THREAD_EXIT _IOW('b', 8, int) #define BINDER_VERSION _IOWR('b', 9, struct binder_version) BINDER_SET_IDLE_PRIORITY 被service_manager用于设置自己作为context manager,所有为0的handle均被指向这个master节点。 用户程序只是将它想要做到命令传递给驱动,而驱动就是将事情完成,用户程序不需要知道如何做的这件事。 Linxu中为保证用户程序与具体设备驱动中的命令一一对应,定义了命令码的结构 这样一个命令码就转变成一个整数形式的命令码。但不直观。IOW的宏定义可以在 Ioctl.h (c:\users\lw\desktop\myandroid2.3\myandroid2.3\kernel_imx\include\asm-generic)中找到 (1) BINDER_VERSION case BINDER_VERSION: if (size != sizeof(struct binder_version)) { ret = -EINVAL; goto err; } if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) { ret = -EINVAL; goto err; } (2)BINDER_SET_MAX_THREADS case BINDER_SET_MAX_THREADS: if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { ret = -EINVAL; goto err; } (3)BINDER_THREAD_EXIT case BINDER_THREAD_EXIT: binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n", proc->pid, thread->pid); binder_free_thread(proc, thread); thread = NULL; (4)BINDER_SET_CONTEXT_MGR 若一个进程/线程能被成功设置成binder_context_mgr_node对象,称该进程/线程为Context Manager.即设置驱动中的全局变量binder_context_mgr_uid为当前进程的uid.并初始化一个Binder_node并赋值给全局变量binder_context_mgr_node。 只有创建binder_context_mgr_node对象的Binder上下文管理进程/线程才有权限重新设置这个对象。 case BINDER_SET_CONTEXT_MGR: if (binder_context_mgr_node != NULL) { printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n"); ret = -EBUSY; goto err; } if (binder_context_mgr_uid != -1) { //检测当前进程是否有操作该命令的权限 if (binder_context_mgr_uid != current->cred->euid) { printk(KERN_ERR "binder: BINDER_SET" "CONTEXT_MGR bad uid %d != %d\n", current->cred->euid, binder_context_mgr_uid); ret = -EPERM; goto err; } } else //取得进程权限 创建binder_node节点 binder_context_mgr_uid = current->cred->euid; binder_context_mgr_node = binder_new_node(proc, NULL, NULL); if (binder_context_mgr_node == NULL) { ret = -ENOMEM; goto err; } //初始化binder_node节点数据 binder_context_mgr_node->local_weak_refs++; binder_context_mgr_node->local_strong_refs++; binder_context_mgr_node->has_strong_ref = 1; binder_context_mgr_node->has_weak_ref = 1;

static struct binder_node *binder_new_node(struct binder_proc *proc, void __user *ptr, void __user *cookie) { struct rb_node **p = &proc->nodes.rb_node; struct rb_node *parent = NULL; struct binder_node *node;

while (*p) {                //查找第一个叶节点               parent = *p;               node = rb_entry(parent, struct binder_node, rb_node);               if (ptr < node->ptr)                        p = &(*p)->rb_left;               else if (ptr > node->ptr)                        p = &(*p)->rb_right;               else                        return NULL;     }

//创建binder_node节点 node = kzalloc(sizeof(*node), GFP_KERNEL); if (node == NULL) return NULL; binder_stats_created(BINDER_STAT_NODE); rb_link_node(&node->rb_node, parent, p); //插入节点 rb_insert_color(&node->rb_node, &proc->nodes); node->debug_id = ++binder_last_id; //初始化数据 node->proc = proc; node->ptr = ptr; node->cookie = cookie; node->work.type = BINDER_WORK_NODE; INIT_LIST_HEAD(&node->work.entry); //初始化链表头 INIT_LIST_HEAD(&node->async_todo); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "binder: %d:%d node %d u%p c%p created\n", proc->pid, current->pid, node->debug_id, node->ptr, node->cookie); return node; } Binder_proc的成员node是binder_node的根节点,这是一颗红黑树,该函数首先根据规则找到第一个叶节点作为新插入的节点的父节点,然后创建binder_node节点并插入。 (5) BINDER_WRITE_READ case BINDER_WRITE_READ: { struct binder_write_read bwr; if (size != sizeof(struct binder_write_read)) { //判断数据完整性 ret = -EINVAL; goto err; } if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { //从用户空间复制数据到binder_write_read中 ret = -EFAULT; goto err; } binder_debug(BINDER_DEBUG_READ_WRITE, "binder: %d:%d write %ld at lx, read %ld at lx\n", proc->pid, thread->pid, bwr.write_size, bwr.write_buffer, bwr.read_size, bwr.read_buffer);

if (bwr.write_size > 0) {  //执行写操作                        ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);                        if (ret < 0) {                                 bwr.read_consumed = 0;                                 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))                                           ret = -EFAULT;                                 goto err;                        }               }               if (bwr.read_size > 0) {  //执行读操作                        ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);                        if (!list_empty(&proc->todo))                                 wake_up_interruptible(&proc->wait);                        if (ret < 0) {                                 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))                                           ret = -EFAULT;                                 goto err;                        }               }               binder_debug(BINDER_DEBUG_READ_WRITE,                             "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",                             proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,                             bwr.read_consumed, bwr.read_size);               if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {  //将数据复制到用户空间                        ret = -EFAULT;                        goto err;               }               break;     }

static int binder_free_thread(struct binder_proc *proc, struct binder_thread *thread) { struct binder_transaction *t; struct binder_transaction *send_reply = NULL; int active_transactions = 0;

rb_erase(&thread->rb_node, &proc->threads);  //将当前线程从红杉树上删除     t = thread->transaction_stack;           //取得binder_transaction数据     if (t && t->to_thread == thread)          //判断要释放的是否是“回复”进程               send_reply = t;     while (t) {                            //释放所有的binder_transaction               active_transactions++;               binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,                             "binder: release %d:%d transaction %d "                             "%s, still active\n", proc->pid, thread->pid,                             t->debug_id,                             (t->to_thread == thread) ? "in" : "out");               if (t->to_thread == thread) {                        t->to_proc = NULL;                        t->to_thread = NULL;                        if (t->buffer) {                                 t->buffer->transaction = NULL;                                 t->buffer = NULL;                        }                        t = t->to_parent;               } else if (t->from == thread) {                        t->from = NULL;                        t = t->from_parent;               } else                        BUG();     }     if (send_reply)         //需要发送失败回复               binder_send_failed_reply(send_reply, BR_DEAD_REPLY);     binder_release_work(&thread->todo);       //释放binder_work     kfree(thread);                 //释放binder_thread     binder_stats_deleted(BINDER_STAT_THREAD);   //改变binder状态     return active_transactions;

}

static void binder_release_work(struct list_head *list) { struct binder_work *w; while (!list_empty(list)) { w = list_first_entry(list, struct binder_work, entry); list_del_init(&w->entry); switch (w->type) { case BINDER_WORK_TRANSACTION: { struct binder_transaction *t;

t = container_of(w, struct binder_transaction, work);                        if (t->buffer->target_node && !(t->flags & TF_ONE_WAY))                                 binder_send_failed_reply(t, BR_DEAD_REPLY);               } break;               case BINDER_WORK_TRANSACTION_COMPLETE: {                        kfree(w);                        binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);               } break;               default:                        break;               }     }

}

转载于:https://my.oschina.net/aoaoH/blog/465971

你可能感兴趣的文章
oracle中decode函数
查看>>
linux系统运维企业常见面试题集合(三)
查看>>
Oracle查询表名及模糊查询
查看>>
Spark 实现自己的RDD,让代码更优雅
查看>>
PHP 精确运算
查看>>
Python基础-Python流程控制
查看>>
Windows Server 2012体验之卸载额外域控制器
查看>>
MySQL主从同步配置实现数据库备份
查看>>
单例设计模式
查看>>
监控Squid的开源工具MySAR
查看>>
linux解压 tar命令
查看>>
安装VMtools失败如何解决
查看>>
添加国际化文件
查看>>
iOS APP提交上架最新流程
查看>>
530A - UART
查看>>
华为服务器虚拟化部署
查看>>
1.3 Illustrator工作区的操作讲解
查看>>
MySQL服务器学习笔记!(一) ——数据库相关概念
查看>>
Eclipse 常用的快捷键及其他常用功能小结
查看>>
redis 分页查询
查看>>