summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm/ttm_bo_util.c
diff options
context:
space:
mode:
authorMarek Olšák <maraeo@gmail.com>2011-08-13 20:32:11 +0000
committerDave Airlie <airlied@redhat.com>2011-08-31 19:25:35 +0100
commitdfadbbdb57b3f2bb33e14f129a43047c6f0caefa (patch)
tree0a6e00b5fa56d9f5a5b4dc833e3057e4660efd1c /drivers/gpu/drm/ttm/ttm_bo_util.c
parentb464e9a25c27884eb8ee2c2bb904ec50bd3990ea (diff)
drm/ttm: add a way to bo_wait for either the last read or last write
Sometimes we want to know whether a buffer is busy and wait for it (bo_wait). However, sometimes it would be more useful to be able to query whether a buffer is busy and being either read or written, and wait until it's stopped being either read or written. The point of this is to be able to avoid unnecessary waiting, e.g. if a GPU has written something to a buffer and is now reading that buffer, and a CPU wants to map that buffer for read, it needs to only wait for the last write. If there were no write, there wouldn't be any waiting needed. This, or course, requires user space drivers to send read/write flags with each relocation (like we have read/write domains in radeon, so we can actually use those for something useful now). Now how this patch works: The read/write flags should passed to ttm_validate_buffer. TTM maintains separate sync objects of the last read and write for each buffer, in addition to the sync object of the last use of a buffer. ttm_bo_wait then operates with one the sync objects. Signed-off-by: Marek Olšák <maraeo@gmail.com> Reviewed-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo_util.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c26
1 files changed, 21 insertions, 5 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index ae3c6f5dd2b7..6135f58169ce 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -436,6 +436,8 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
atomic_set(&fbo->cpu_writers, 0);
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ fbo->sync_obj_read = driver->sync_obj_ref(bo->sync_obj_read);
+ fbo->sync_obj_write = driver->sync_obj_ref(bo->sync_obj_write);
kref_init(&fbo->list_kref);
kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy;
@@ -618,20 +620,30 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
struct ttm_buffer_object *ghost_obj;
- void *tmp_obj = NULL;
+ void *tmp_obj = NULL, *tmp_obj_read = NULL, *tmp_obj_write = NULL;
spin_lock(&bdev->fence_lock);
- if (bo->sync_obj) {
+ if (bo->sync_obj)
tmp_obj = bo->sync_obj;
- bo->sync_obj = NULL;
- }
+ if (bo->sync_obj_read)
+ tmp_obj_read = bo->sync_obj_read;
+ if (bo->sync_obj_write)
+ tmp_obj_write = bo->sync_obj_write;
+
bo->sync_obj = driver->sync_obj_ref(sync_obj);
+ bo->sync_obj_read = driver->sync_obj_ref(sync_obj);
+ bo->sync_obj_write = driver->sync_obj_ref(sync_obj);
bo->sync_obj_arg = sync_obj_arg;
if (evict) {
- ret = ttm_bo_wait(bo, false, false, false);
+ ret = ttm_bo_wait(bo, false, false, false,
+ TTM_USAGE_READWRITE);
spin_unlock(&bdev->fence_lock);
if (tmp_obj)
driver->sync_obj_unref(&tmp_obj);
+ if (tmp_obj_read)
+ driver->sync_obj_unref(&tmp_obj_read);
+ if (tmp_obj_write)
+ driver->sync_obj_unref(&tmp_obj_write);
if (ret)
return ret;
@@ -655,6 +667,10 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
spin_unlock(&bdev->fence_lock);
if (tmp_obj)
driver->sync_obj_unref(&tmp_obj);
+ if (tmp_obj_read)
+ driver->sync_obj_unref(&tmp_obj_read);
+ if (tmp_obj_write)
+ driver->sync_obj_unref(&tmp_obj_write);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)