diff options
author | Gerd Hoffmann <kraxel@redhat.com> | 2015-08-19 23:44:15 +0200 |
---|---|---|
committer | Gerd Hoffmann <kraxel@redhat.com> | 2015-10-16 10:44:00 +0200 |
commit | ec2f0577c7b1fccc7a5d7ee8002a4f162061498f (patch) | |
tree | 218790a9de25b9f7f415d94f51d4114c9f730ca5 | |
parent | 9c73f4782642c785569ad50e01324002d160bd09 (diff) |
virtio-gpu: add & use virtio_gpu_queue_fenced_ctrl_buffer
Add helper function to handle the submission of fenced control requests.
Make sure we initialize the fence while holding the virtqueue lock, so
requests can't be reordered.
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_fence.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_vq.c | 40 |
2 files changed, 35 insertions, 7 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c index 1da632631dac..793ad9f631fd 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fence.c +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c @@ -81,7 +81,7 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; unsigned long irq_flags; - *fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_KERNEL); + *fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_ATOMIC); if ((*fence) == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 5b9bc242890f..ee25e9a4ae03 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -347,6 +347,38 @@ static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, return rc; } +static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf, + struct virtio_gpu_ctrl_hdr *hdr, + struct virtio_gpu_fence **fence) +{ + struct virtqueue *vq = vgdev->ctrlq.vq; + int rc; + +again: + spin_lock(&vgdev->ctrlq.qlock); + + /* + * Make sure we have enouth space in the virtqueue. If not + * wait here until we have. + * + * Without that virtio_gpu_queue_ctrl_buffer_nolock might have + * to wait for free space, which can result in fence ids being + * submitted out-of-order. + */ + if (vq->num_free < 3) { + spin_unlock(&vgdev->ctrlq.qlock); + wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3); + goto again; + } + + if (fence) + virtio_gpu_fence_emit(vgdev, hdr, fence); + rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); + spin_unlock(&vgdev->ctrlq.qlock); + return rc; +} + static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, struct virtio_gpu_vbuffer *vbuf) { @@ -499,9 +531,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, cmd_p->r.x = x; cmd_p->r.y = y; - if (fence) - virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence); - virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); } static void @@ -524,9 +554,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev, vbuf->data_buf = ents; vbuf->data_size = sizeof(*ents) * nents; - if (fence) - virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence); - virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); } static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev, |