当前位置: 首页>编程语言>正文

android 如何与i2c通信 android ioctl

2, 客户端Binder驱动

在Binder驱动层,和ioctl()相对的动作是binder_ioctl()函数。binder_ioctl()函数负责在两个进程间收发IPC数据和IPC reply数据。

ioctl(文件描述符,ioctl命令,数据类型)

(1) 文件描述符,是通过open()方法打开Binder Driver后返回值;

(2) ioctl命令和数据类型是一体的,不同的命令对应不同的数据类型.例如,

ioctl命令

数据类型

操作

BINDER_WRITE_READ

struct binder_write_read

收发Binder IPC数据

BINDER_SET_MAX_THREADS

__u32

设置Binder线程最大个数

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    int ret;
    struct binder_proc *proc = filp->private_data;
    struct binder_thread *thread;  // binder线程
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;
    //进入休眠状态,直到中断唤醒
    ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
    if (ret)
        goto err_unlocked;

    binder_lock(__func__);
    //获取binder_thread
    thread = binder_get_thread(proc);
    if (thread == NULL) {
        ret = -ENOMEM;
        goto err;
    }

    switch (cmd) { //处理各种命令
    case BINDER_WRITE_READ:  //进行binder的读写操作
        if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
			ret = -EFAULT;
			goto err;
		}
		binder_debug(BINDER_DEBUG_READ_WRITE,
			     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
			     proc->pid, thread->pid,
			     (u64)bwr.write_size, (u64)bwr.write_buffer,
			     (u64)bwr.read_size, (u64)bwr.read_buffer);

		if (bwr.write_size > 0) {
			ret = binder_thread_write(proc, thread, bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
			trace_binder_write_done(ret);
			if (ret < 0) {
				bwr.read_consumed = 0;
				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
					ret = -EFAULT;
				goto err;
			}
		}
		if (bwr.read_size > 0) {
			ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
			trace_binder_read_done(ret);
			if (!list_empty(&proc->todo))
				wake_up_interruptible(&proc->wait);
			if (ret < 0) {
				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
					ret = -EFAULT;
				goto err;
			}
		}        break;
    •••


1,获取线程。

2,调用binder_thread_write方法将数据封装发送到servicemanager服务端进程。

3,调用binder_thread_read方法读取从servicemanager服务端进程传递过来的数据。

当然这是需要返回值的情况,需要返回值时binder_thread_write方法完成之后调用binder_thread_read方法,

不需要返回值时首先返回到talkWithDriver,然后返回到waitForResponse方法。

2.1 获取线程

static struct binder_thread *binder_get_thread(struct binder_proc *proc)
{
    struct binder_thread *thread = NULL;
    struct rb_node *parent = NULL;
    struct rb_node **p = &proc->threads.rb_node;
    while (*p) {  //根据当前进程的pid,从binder_proc中查找相应的binder_thread
        parent = *p;
        thread = rb_entry(parent, struct binder_thread, rb_node);
        if (current->pid < thread->pid)
            p = &(*p)->rb_left;
        else if (current->pid > thread->pid)
            p = &(*p)->rb_right;
        else
            break;
    }
    if (*p == NULL) {
        thread = kzalloc(sizeof(*thread), GFP_KERNEL); //新建binder_thread结构体
        if (thread == NULL)
            return NULL;
        binder_stats_created(BINDER_STAT_THREAD);
        thread->proc = proc;
        thread->pid = current->pid;  //保存当前进程(线程)的pid
        init_waitqueue_head(&thread->wait);
        INIT_LIST_HEAD(&thread->todo);
        rb_link_node(&thread->rb_node, parent, p);
        rb_insert_color(&thread->rb_node, &proc->threads);
        thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
        thread->return_error = BR_OK;
        thread->return_error2 = BR_OK;
    }
    return thread;
}

从binder_proc中查找binder_thread的从threads树中查找和current线程匹配的binder_thread节点,

如果当前线程已经加入到proc的线程队列则直接返回,如果不存在则创建binder_thread,并将当前

线程添加到当前的proc。

2.2 发送数据

binder_thread_write()的代码截选如下。因为本文主要关心传输方面的问题,所以只摘取了

case BC_TRANSACTION、case BC_REPLY部分的代码:

case BC_TRANSACTION:
case BC_REPLY: {
	truct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
		return -EFAULT;
		ptr += sizeof(tr);
		binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
	break;
}

主要是从用户态拷贝来binder_transaction_data数据,调用binder_transaction函数来传输Binder

实体对象到目标进程。当然,这次是处理BC_TRANSACTION命令,主要方法如下,

static void binder_transaction(struct binder_proc *proc,
               struct binder_thread *thread,
               struct binder_transaction_data *tr, int reply){

    if (reply) {
        ...
    }else {
        if (tr->target.handle) {
            ...
        } else {
            // handle=0则找到servicemanager实体
            target_node = binder_context_mgr_node;
        }
        //target_proc为servicemanager进程
        target_proc = target_node->proc;
    }

    if (target_thread) {
        ...
    } else {
        //找到servicemanager进程的todo队列
        target_list = &target_proc->todo;
        target_wait = &target_proc->wait;
    }

    t = kzalloc(sizeof(*t), GFP_KERNEL); //创建一个新的事务项
    tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);

    //非oneway的通信方式,把当前thread保存到transaction的from字段
    if (!reply && !(tr->flags & TF_ONE_WAY))
        t->from = thread;
    else
        t->from = NULL;

    t->sender_euid = task_euid(proc->tsk);
    t->to_proc = target_proc; //此次通信目标进程为servicemanager进程
    t->to_thread = target_thread;
    t->code = tr->code;  //此次通信code = ADD_SERVICE_TRANSACTION
    t->flags = tr->flags;  // 此次通信flags = 0
    t->priority = task_nice(current);

    //从servicemanager进程中分配buffer
    t->buffer = binder_alloc_buf(target_proc, tr->data_size,
        tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));

    t->buffer->allow_user_free = 0;
    t->buffer->transaction = t;
    t->buffer->target_node = target_node;

    if (target_node)
        binder_inc_node(target_node, 1, 0, NULL); //引用计数加1
    offp = (binder_size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));

    //分别拷贝用户空间的binder_transaction_data中ptr.buffer和ptr.offsets到内核
    copy_from_user(t->buffer->data,
        (const void __user *)(uintptr_t)tr->data.ptr.buffer, tr->data_size);
    copy_from_user(offp,
        (const void __user *)(uintptr_t)tr->data.ptr.offsets, tr->offsets_size);

    off_end = (void *)offp + tr->offsets_size;

    for (; offp < off_end; offp++) {
        struct flat_binder_object *fp;
        fp = (struct flat_binder_object *)(t->buffer->data + *offp);
        off_min = *offp + sizeof(struct flat_binder_object);
        switch (fp->type) {
            case BINDER_TYPE_BINDER:
            case BINDER_TYPE_WEAK_BINDER: {
              struct binder_ref *ref; 
              struct binder_node *node = binder_get_node(proc, fp->binder);
              if (node == NULL) { 
                //服务所在进程 创建binder_node实体
                node = binder_new_node(proc, fp->binder, fp->cookie);
                ...
              }
              //servicemanager进程binder_ref
              ref = binder_get_ref_for_node(target_proc, node);
              ...
              //调整type为HANDLE类型
              if (fp->type == BINDER_TYPE_BINDER)
                fp->type = BINDER_TYPE_HANDLE;
              else
                fp->type = BINDER_TYPE_WEAK_HANDLE;
              fp->binder = 0;
              fp->handle = ref->desc; //设置handle值
              fp->cookie = 0;
              binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
                       &thread->todo);
            } break;
            case :...
    }

    if (reply) {
        ..
    } else if (!(t->flags & TF_ONE_WAY)) {
        //BC_TRANSACTION 且 非oneway,则设置事务栈信息
        t->need_reply = 1;
        t->from_parent = thread->transaction_stack;
        thread->transaction_stack = t;
    } else {
        ...
    }

    //将BINDER_WORK_TRANSACTION添加到目标队列,本次通信的目标队列为target_proc->todo
    t->work.type = BINDER_WORK_TRANSACTION;
    list_add_tail(&t->work.entry, target_list);

    //将BINDER_WORK_TRANSACTION_COMPLETE添加到当前线程的todo队列
    tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
    list_add_tail(&tcomplete->entry, &thread->todo);

    //唤醒等待队列,本次通信的目标队列为target_proc->wait
    if (target_wait)
        wake_up_interruptible(target_wait);
    return;
}

主要逻辑如下,

1, 分析并插入红黑树节点。

2, 前面分析binder_context_mgr_node就是指向servicemanager进程,因此,向servicemanager的

binder_proc->todo添加BINDER_WORK_TRANSACTION事务,并调用wake_up_interruptible方法

唤醒servicemanager进程。

利用binder_transaction_data来封装一个工作事务,当然后面servicemanager进程中肯定会获取该

工作事务,其实就是一个封包和拆包的过程。

2.3 获取数据

在此声明一下,需要返回值时binder_thread_write方法完成之后调用binder_thread_read方法,不需

要返回值时首先返回到talkWithDriver,然后返回到waitForResponse方法。

发送数据当然是往servicemanager服务端进程发送,然后才读取servicemanager服务端进程返回的数据。

 

当然,这时候该读取线程(binder_thread_read方法)处于等待状态。一直等到servicemanager服务端进程

处理完成之后,在内核态中唤醒该线程才继续执行。这也是一个关键点。


https://www.xamrdz.com/lan/5be1960296.html

相关文章: