diff options
author | Hans Verkuil <hans.verkuil@cisco.com> | 2018-05-21 04:54:53 -0400 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab+samsung@kernel.org> | 2018-08-31 11:24:02 -0400 |
commit | 803a7ab758252599ddadf5fe9326acee7340d9b6 (patch) | |
tree | dc37d01104638e1dbc347b7020cde0e2b936dd22 /drivers/media/v4l2-core/v4l2-mem2mem.c | |
parent | 9ac8090fda77f072815c209c80fb230e89cda18c (diff) |
media: v4l2-mem2mem: add vb2_m2m_request_queue
For mem2mem devices we have to make sure that v4l2_m2m_try_schedule()
is called whenever a request is queued.
We do that by creating a vb2_m2m_request_queue() helper that should
be used instead of the 'normal' vb2_request_queue() helper. The m2m
helper function will call v4l2_m2m_try_schedule() as needed.
In addition we also avoid calling v4l2_m2m_try_schedule() when preparing
or queueing a buffer for a request since that is no longer needed.
Instead this helper function will do that when the request is actually
queued.
Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
Reviewed-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
Diffstat (limited to 'drivers/media/v4l2-core/v4l2-mem2mem.c')
-rw-r--r-- | drivers/media/v4l2-core/v4l2-mem2mem.c | 63 |
1 files changed, 55 insertions, 8 deletions
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index 4de8fa163fd3..d7806db222d8 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -387,7 +387,7 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); if (m2m_dev->m2m_ops->job_abort) m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); - dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx); + dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx); wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING)); } else if (m2m_ctx->job_flags & TRANS_QUEUED) { @@ -478,8 +478,14 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, int ret; vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); + if (!V4L2_TYPE_IS_OUTPUT(vq->type) && + (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) { + dprintk("%s: requests cannot be used with capture buffers\n", + __func__); + return -EPERM; + } ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf); - if (!ret) + if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST)) v4l2_m2m_try_schedule(m2m_ctx); return ret; @@ -501,14 +507,9 @@ int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, { struct video_device *vdev = video_devdata(file); struct vb2_queue *vq; - int ret; vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); - ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); - if (!ret) - v4l2_m2m_try_schedule(m2m_ctx); - - return ret; + return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); } EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); @@ -952,6 +953,52 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, } EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); +void vb2_m2m_request_queue(struct media_request *req) +{ + struct media_request_object *obj, *obj_safe; + struct v4l2_m2m_ctx *m2m_ctx = NULL; + + /* + * Queue all objects. Note that buffer objects are at the end of the + * objects list, after all other object types. Once buffer objects + * are queued, the driver might delete them immediately (if the driver + * processes the buffer at once), so we have to use + * list_for_each_entry_safe() to handle the case where the object we + * queue is deleted. + */ + list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { + struct v4l2_m2m_ctx *m2m_ctx_obj; + struct vb2_buffer *vb; + + if (!obj->ops->queue) + continue; + + if (vb2_request_object_is_buffer(obj)) { + /* Sanity checks */ + vb = container_of(obj, struct vb2_buffer, req_obj); + WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)); + m2m_ctx_obj = container_of(vb->vb2_queue, + struct v4l2_m2m_ctx, + out_q_ctx.q); + WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx); + m2m_ctx = m2m_ctx_obj; + } + + /* + * The buffer we queue here can in theory be immediately + * unbound, hence the use of list_for_each_entry_safe() + * above and why we call the queue op last. + */ + obj->ops->queue(obj); + } + + WARN_ON(!m2m_ctx); + + if (m2m_ctx) + v4l2_m2m_try_schedule(m2m_ctx); +} +EXPORT_SYMBOL_GPL(vb2_m2m_request_queue); + /* Videobuf2 ioctl helpers */ int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, |