diff options
author | Gurchetan Singh <gurchetansingh@chromium.org> | 2019-12-02 17:36:27 -0800 |
---|---|---|
committer | Gerd Hoffmann <kraxel@redhat.com> | 2019-12-05 08:57:45 +0100 |
commit | 284562e1f34874e267d4f499362c3816f8f6bc3f (patch) | |
tree | 9970190cb3f23208551dc56e764753988dbd57fb /drivers | |
parent | 17a7ce203490459cff14fb1c8f9a15d65fd1c544 (diff) |
udmabuf: implement begin_cpu_access/end_cpu_access hooks
With the misc device, we should end up using the result of
get_arch_dma_ops(..) or dma-direct ops.
This can allow us to have WC mappings in the guest after
synchronization.
Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org>
Link: http://patchwork.freedesktop.org/patch/msgid/20191203013627.85991-4-gurchetansingh@chromium.org
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/dma-buf/udmabuf.c | 39 |
1 files changed, 39 insertions, 0 deletions
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index 0a610e09ae23..61b0a2cff874 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -18,6 +18,7 @@ static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */ struct udmabuf { pgoff_t pagecount; struct page **pages; + struct sg_table *sg; struct miscdevice *device; }; @@ -98,20 +99,58 @@ static void unmap_udmabuf(struct dma_buf_attachment *at, static void release_udmabuf(struct dma_buf *buf) { struct udmabuf *ubuf = buf->priv; + struct device *dev = ubuf->device->this_device; pgoff_t pg; + if (ubuf->sg) + put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL); + for (pg = 0; pg < ubuf->pagecount; pg++) put_page(ubuf->pages[pg]); kfree(ubuf->pages); kfree(ubuf); } +static int begin_cpu_udmabuf(struct dma_buf *buf, + enum dma_data_direction direction) +{ + struct udmabuf *ubuf = buf->priv; + struct device *dev = ubuf->device->this_device; + + if (!ubuf->sg) { + ubuf->sg = get_sg_table(dev, buf, direction); + if (IS_ERR(ubuf->sg)) + return PTR_ERR(ubuf->sg); + } else { + dma_sync_sg_for_device(dev, ubuf->sg->sgl, + ubuf->sg->nents, + direction); + } + + return 0; +} + +static int end_cpu_udmabuf(struct dma_buf *buf, + enum dma_data_direction direction) +{ + struct udmabuf *ubuf = buf->priv; + struct device *dev = ubuf->device->this_device; + + if (!ubuf->sg) + return -EINVAL; + + dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, direction); + return 0; +} + static const struct dma_buf_ops udmabuf_ops = { .cache_sgt_mapping = true, .map_dma_buf = map_udmabuf, .unmap_dma_buf = unmap_udmabuf, .release = release_udmabuf, .mmap = mmap_udmabuf, + .begin_cpu_access = begin_cpu_udmabuf, + .end_cpu_access = end_cpu_udmabuf, }; #define SEALS_WANTED (F_SEAL_SHRINK) |