summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2019-11-04 18:38:01 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2019-11-06 11:24:14 +0100
commit6b1ce0a2009b7e73cf31c1d737f1b8e487f5e8f8 (patch)
treed42024925fa86f97831ab97a4346481f003afc6f /drivers
parent03e0d26fcf791e48164ff7c280c71225c361a89e (diff)
drm/ttm: remove ttm_bo_wait_unreserved
With nouveau fixed all ttm-using drives have the correct nesting of mmap_sem vs dma_resv, and we can just lock the buffer. Assuming I didn't screw up anything with my audit of course. v2: - Dont forget wu_mutex (Christian König) - Keep the mmap_sem-less wait optimization (Thomas) - Use _lock_interruptible to be good citizens (Thomas) v3: Rebase over fault handler helperification. Reviewed-by: Christian König <christian.koenig@amd.com> (v2) Reviewed-by: Thomas Hellström <thellstrom@vmware.com> (v2) Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Cc: Christian Koenig <christian.koenig@amd.com> Cc: Huang Rui <ray.huang@amd.com> Cc: Gerd Hoffmann <kraxel@redhat.com> Cc: "VMware Graphics" <linux-graphics-maintainer@vmware.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191104173801.2972-3-daniel.vetter@ffwll.ch
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c36
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c18
3 files changed, 5 insertions, 50 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index d52fc16266ce..7e7925fecd9e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -161,7 +161,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
dma_fence_put(bo->moving);
if (!ttm_bo_uses_embedded_gem_object(bo))
dma_resv_fini(&bo->base._resv);
- mutex_destroy(&bo->wu_mutex);
bo->destroy(bo);
ttm_mem_global_free(&ttm_mem_glob, acc_size);
}
@@ -1291,7 +1290,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
INIT_LIST_HEAD(&bo->io_reserve_lru);
- mutex_init(&bo->wu_mutex);
bo->bdev = bdev;
bo->type = type;
bo->num_pages = num_pages;
@@ -1895,37 +1893,3 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
}
EXPORT_SYMBOL(ttm_bo_swapout_all);
-
-/**
- * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
- * unreserved
- *
- * @bo: Pointer to buffer
- */
-int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
-{
- int ret;
-
- /*
- * In the absense of a wait_unlocked API,
- * Use the bo::wu_mutex to avoid triggering livelocks due to
- * concurrent use of this function. Note that this use of
- * bo::wu_mutex can go away if we change locking order to
- * mmap_sem -> bo::reserve.
- */
- ret = mutex_lock_interruptible(&bo->wu_mutex);
- if (unlikely(ret != 0))
- return -ERESTARTSYS;
- if (!dma_resv_is_locked(bo->base.resv))
- goto out_unlock;
- ret = dma_resv_lock_interruptible(bo->base.resv, NULL);
- if (ret == -EINTR)
- ret = -ERESTARTSYS;
- if (unlikely(ret != 0))
- goto out_unlock;
- dma_resv_unlock(bo->base.resv);
-
-out_unlock:
- mutex_unlock(&bo->wu_mutex);
- return ret;
-}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 6b0883a1776e..2b0e5a088da0 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -504,7 +504,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&fbo->base.lru);
INIT_LIST_HEAD(&fbo->base.swap);
INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
- mutex_init(&fbo->base.wu_mutex);
fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 11863fbdd5d6..91466cfb6f16 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -128,30 +128,22 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
struct vm_fault *vmf)
{
- /*
- * Work around locking order reversal in fault / nopfn
- * between mmap_sem and bo_reserve: Perform a trylock operation
- * for reserve, and if it fails, retry the fault after waiting
- * for the buffer to become unreserved.
- */
if (unlikely(!dma_resv_trylock(bo->base.resv))) {
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
ttm_bo_get(bo);
up_read(&vmf->vma->vm_mm->mmap_sem);
- (void) ttm_bo_wait_unreserved(bo);
+ if (!dma_resv_lock_interruptible(bo->base.resv,
+ NULL))
+ dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
}
return VM_FAULT_RETRY;
}
- /*
- * If we'd want to change locking order to
- * mmap_sem -> bo::reserve, we'd use a blocking reserve here
- * instead of retrying the fault...
- */
- return VM_FAULT_NOPAGE;
+ if (dma_resv_lock_interruptible(bo->base.resv, NULL))
+ return VM_FAULT_NOPAGE;
}
return 0;