summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gem
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-10-04 14:39:58 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2019-10-04 15:39:02 +0100
commit2850748ef8763ab46958e43a4d1c445f29eeb37d (patch)
treeba213c8039460e33090e9d8bb39b91d102a3a85d /drivers/gpu/drm/i915/gem
parent11331125e1480ff786be9d2051301401b652bbe1 (diff)
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the local vm->mutex instead. Note that the vm->mutex is tainted by the shrinker (we require unbinding from inside fs-reclaim) and so we cannot allocate while holding that mutex. Instead we have to preallocate workers to do allocate and apply the PTE updates after we have we reserved their slot in the drm_mm (using fences to order the PTE writes with the GPU work and with later unbind). In adding the asynchronous vma binding, one subtle requirement is to avoid coupling the binding fence into the backing object->resv. That is the asynchronous binding only applies to the vma timeline itself and not to the pages as that is a more global timeline (the binding of one vma does not need to be ordered with another vma, nor does the implicit GEM fencing depend on a vma, only on writes to the backing store). Keeping the vma binding distinct from the backing store timelines is verified by a number of async gem_exec_fence and gem_exec_schedule tests. The way we do this is quite simple, we keep the fence for the vma binding separate and only wait on it as required, and never add it to the obj->resv itself. Another consequence in reducing the locking around the vma is the destruction of the vma is no longer globally serialised by struct_mutex. A natural solution would be to add a kref to i915_vma, but that requires decoupling the reference cycles, possibly by introducing a new i915_mm_pages object that is own by both obj->mm and vma->pages. However, we have not taken that route due to the overshadowing lmem/ttm discussions, and instead play a series of complicated games with trylocks to (hopefully) ensure that only one destruction path is called! v2: Add some commentary, and some helpers to reduce patch churn. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/gem')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c43
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c33
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c73
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c27
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c27
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c9
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c12
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c7
15 files changed, 96 insertions, 207 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 7f61a8024133..c1fca5728e6e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -211,7 +211,7 @@ static void clear_pages_worker(struct work_struct *work)
* keep track of the GPU activity within this vma/request, and
* propagate the signal from the request to w->dma.
*/
- err = i915_active_add_request(&vma->active, rq);
+ err = __i915_vma_move_to_active(vma, rq);
if (err)
goto out_request;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index f7ba0935ed67..95f8e66e45db 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -313,8 +313,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
release_hw_id(ctx);
- if (ctx->vm)
- i915_vm_put(ctx->vm);
free_engines(rcu_access_pointer(ctx->engines));
mutex_destroy(&ctx->engines_mutex);
@@ -379,9 +377,13 @@ void i915_gem_context_release(struct kref *ref)
static void context_close(struct i915_gem_context *ctx)
{
+ i915_gem_context_set_closed(ctx);
+
+ if (ctx->vm)
+ i915_vm_close(ctx->vm);
+
mutex_lock(&ctx->mutex);
- i915_gem_context_set_closed(ctx);
ctx->file_priv = ERR_PTR(-EBADF);
/*
@@ -474,7 +476,7 @@ __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
- ctx->vm = i915_vm_get(vm);
+ ctx->vm = i915_vm_open(vm);
context_apply_all(ctx, __apply_ppgtt, vm);
return old;
@@ -488,7 +490,7 @@ static void __assign_ppgtt(struct i915_gem_context *ctx,
vm = __set_ppgtt(ctx, vm);
if (vm)
- i915_vm_put(vm);
+ i915_vm_close(vm);
}
static void __set_timeline(struct intel_timeline **dst,
@@ -953,7 +955,7 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
if (ret < 0)
goto err_unlock;
- i915_vm_get(vm);
+ i915_vm_open(vm);
args->size = 0;
args->value = ret;
@@ -973,7 +975,7 @@ static void set_ppgtt_barrier(void *data)
if (INTEL_GEN(old->i915) < 8)
gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
- i915_vm_put(old);
+ i915_vm_close(old);
}
static int emit_ppgtt_update(struct i915_request *rq, void *data)
@@ -1090,8 +1092,8 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
set_ppgtt_barrier,
old);
if (err) {
- i915_vm_put(__set_ppgtt(ctx, old));
- i915_vm_put(old);
+ i915_vm_close(__set_ppgtt(ctx, old));
+ i915_vm_close(old);
}
unlock:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 55c3ab59e3aa..9937b4c341f1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -288,7 +288,12 @@ restart:
if (!drm_mm_node_allocated(&vma->node))
continue;
- ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
+ /* Wait for an earlier async bind, need to rewrite it */
+ ret = i915_vma_sync(vma);
+ if (ret)
+ return ret;
+
+ ret = i915_vma_bind(vma, cache_level, PIN_UPDATE, NULL);
if (ret)
return ret;
}
@@ -391,16 +396,11 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
if (ret)
goto out;
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- goto out;
-
ret = i915_gem_object_lock_interruptible(obj);
if (ret == 0) {
ret = i915_gem_object_set_cache_level(obj, level);
i915_gem_object_unlock(obj);
}
- mutex_unlock(&i915->drm.struct_mutex);
out:
i915_gem_object_put(obj);
@@ -485,6 +485,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
if (!drm_mm_node_allocated(&vma->node))
continue;
+ GEM_BUG_ON(vma->vm != &i915->ggtt.vm);
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
}
mutex_unlock(&i915->ggtt.vm.mutex);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 228ce24ea280..42c15e8bf166 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -698,7 +698,9 @@ static int eb_reserve(struct i915_execbuffer *eb)
case 1:
/* Too fragmented, unbind everything and retry */
+ mutex_lock(&eb->context->vm->mutex);
err = i915_gem_evict_vm(eb->context->vm);
+ mutex_unlock(&eb->context->vm->mutex);
if (err)
return err;
break;
@@ -972,7 +974,9 @@ static void reloc_cache_reset(struct reloc_cache *cache)
ggtt->vm.clear_range(&ggtt->vm,
cache->node.start,
cache->node.size);
+ mutex_lock(&ggtt->vm.mutex);
drm_mm_remove_node(&cache->node);
+ mutex_unlock(&ggtt->vm.mutex);
} else {
i915_vma_unpin((struct i915_vma *)cache->node.mm);
}
@@ -1047,11 +1051,13 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
PIN_NOEVICT);
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
+ mutex_lock(&ggtt->vm.mutex);
err = drm_mm_insert_node_in_range
(&ggtt->vm.mm, &cache->node,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) /* no inactive aperture space, use cpu reloc */
return NULL;
} else {
@@ -1416,7 +1422,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
IS_GEN(eb->i915, 6)) {
err = i915_vma_bind(target, target->obj->cache_level,
- PIN_GLOBAL);
+ PIN_GLOBAL, NULL);
if (WARN_ONCE(err,
"Unexpected failure to bind target VMA!"))
return err;
@@ -2142,35 +2148,6 @@ static struct i915_request *eb_throttle(struct intel_context *ce)
return i915_request_get(rq);
}
-static int
-__eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
-{
- int err;
-
- if (likely(atomic_inc_not_zero(&ce->pin_count)))
- return 0;
-
- err = mutex_lock_interruptible(&eb->i915->drm.struct_mutex);
- if (err)
- return err;
-
- err = __intel_context_do_pin(ce);
- mutex_unlock(&eb->i915->drm.struct_mutex);
-
- return err;
-}
-
-static void
-__eb_unpin_context(struct i915_execbuffer *eb, struct intel_context *ce)
-{
- if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
- return;
-
- mutex_lock(&eb->i915->drm.struct_mutex);
- intel_context_unpin(ce);
- mutex_unlock(&eb->i915->drm.struct_mutex);
-}
-
static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
{
struct intel_timeline *tl;
@@ -2190,7 +2167,7 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- err = __eb_pin_context(eb, ce);
+ err = intel_context_pin(ce);
if (err)
return err;
@@ -2234,7 +2211,7 @@ err_exit:
intel_context_exit(ce);
intel_context_timeline_unlock(tl);
err_unpin:
- __eb_unpin_context(eb, ce);
+ intel_context_unpin(ce);
return err;
}
@@ -2247,7 +2224,7 @@ static void eb_unpin_engine(struct i915_execbuffer *eb)
intel_context_exit(ce);
mutex_unlock(&tl->mutex);
- __eb_unpin_context(eb, ce);
+ intel_context_unpin(ce);
}
static unsigned int
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index dd0c2840ba4d..c19431d609fc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -249,16 +249,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
if (ret)
goto err_rpm;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto err_reset;
-
- /* Access to snoopable pages through the GTT is incoherent. */
- if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
- ret = -EFAULT;
- goto err_unlock;
- }
-
/* Now pin it into the GTT as needed */
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
@@ -291,7 +281,13 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
}
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- goto err_unlock;
+ goto err_reset;
+ }
+
+ /* Access to snoopable pages through the GTT is incoherent. */
+ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
+ ret = -EFAULT;
+ goto err_unpin;
}
ret = i915_vma_pin_fence(vma);
@@ -329,8 +325,6 @@ err_fence:
i915_vma_unpin_fence(vma);
err_unpin:
__i915_vma_unpin(vma);
-err_unlock:
- mutex_unlock(&dev->struct_mutex);
err_reset:
intel_gt_reset_unlock(ggtt->vm.gt, srcu);
err_rpm:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 0ef60dae23a7..dbf9be9a79f4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -155,21 +155,30 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
llist_for_each_entry_safe(obj, on, freed, freed) {
- struct i915_vma *vma, *vn;
-
trace_i915_gem_object_destroy(obj);
- mutex_lock(&i915->drm.struct_mutex);
-
- list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
- GEM_BUG_ON(i915_vma_is_active(vma));
- atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
- i915_vma_destroy(vma);
+ if (!list_empty(&obj->vma.list)) {
+ struct i915_vma *vma;
+
+ /*
+ * Note that the vma keeps an object reference while
+ * it is active, so it *should* not sleep while we
+ * destroy it. Our debug code errs insits it *might*.
+ * For the moment, play along.
+ */
+ spin_lock(&obj->vma.lock);
+ while ((vma = list_first_entry_or_null(&obj->vma.list,
+ struct i915_vma,
+ obj_link))) {
+ GEM_BUG_ON(vma->obj != obj);
+ spin_unlock(&obj->vma.lock);
+
+ i915_vma_destroy(vma);
+
+ spin_lock(&obj->vma.lock);
+ }
+ spin_unlock(&obj->vma.lock);
}
- GEM_BUG_ON(!list_empty(&obj->vma.list));
- GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
-
- mutex_unlock(&i915->drm.struct_mutex);
GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 53c7069ba3e8..086a9bf5adcc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -106,6 +106,11 @@ static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
dma_resv_lock(obj->base.resv, NULL);
}
+static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
+{
+ return dma_resv_trylock(obj->base.resv);
+}
+
static inline int
i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
{
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index d2c05d752909..fd3ce6da8497 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -16,40 +16,6 @@
#include "i915_trace.h"
-static bool shrinker_lock(struct drm_i915_private *i915,
- unsigned int flags,
- bool *unlock)
-{
- struct mutex *m = &i915->drm.struct_mutex;
-
- switch (mutex_trylock_recursive(m)) {
- case MUTEX_TRYLOCK_RECURSIVE:
- *unlock = false;
- return true;
-
- case MUTEX_TRYLOCK_FAILED:
- *unlock = false;
- if (flags & I915_SHRINK_ACTIVE &&
- mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0)
- *unlock = true;
- return *unlock;
-
- case MUTEX_TRYLOCK_SUCCESS:
- *unlock = true;
- return true;
- }
-
- BUG();
-}
-
-static void shrinker_unlock(struct drm_i915_private *i915, bool unlock)
-{
- if (!unlock)
- return;
-
- mutex_unlock(&i915->drm.struct_mutex);
-}
-
static bool swap_available(void)
{
return get_nr_swap_pages() > 0;
@@ -155,10 +121,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
intel_wakeref_t wakeref = 0;
unsigned long count = 0;
unsigned long scanned = 0;
- bool unlock;
-
- if (!shrinker_lock(i915, shrink, &unlock))
- return 0;
/*
* When shrinking the active list, we should also consider active
@@ -268,8 +230,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
if (shrink & I915_SHRINK_BOUND)
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- shrinker_unlock(i915, unlock);
-
if (nr_scanned)
*nr_scanned += scanned;
return count;
@@ -339,19 +299,14 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_i915_private *i915 =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
unsigned long freed;
- bool unlock;
sc->nr_scanned = 0;
- if (!shrinker_lock(i915, 0, &unlock))
- return SHRINK_STOP;
-
freed = i915_gem_shrink(i915,
sc->nr_to_scan,
&sc->nr_scanned,
I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_WRITEBACK);
+ I915_SHRINK_UNBOUND);
if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
intel_wakeref_t wakeref;
@@ -366,8 +321,6 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
}
}
- shrinker_unlock(i915, unlock);
-
return sc->nr_scanned ? freed : SHRINK_STOP;
}
@@ -384,6 +337,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
freed_pages = 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
+ I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_WRITEBACK);
@@ -419,10 +373,6 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
struct i915_vma *vma, *next;
unsigned long freed_pages = 0;
intel_wakeref_t wakeref;
- bool unlock;
-
- if (!shrinker_lock(i915, 0, &unlock))
- return NOTIFY_DONE;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
@@ -439,15 +389,11 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
if (!vma->iomap || i915_vma_is_active(vma))
continue;
- mutex_unlock(&i915->ggtt.vm.mutex);
- if (i915_vma_unbind(vma) == 0)
+ if (__i915_vma_unbind(vma) == 0)
freed_pages += count;
- mutex_lock(&i915->ggtt.vm.mutex);
}
mutex_unlock(&i915->ggtt.vm.mutex);
- shrinker_unlock(i915, unlock);
-
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
}
@@ -490,22 +436,9 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
fs_reclaim_acquire(GFP_KERNEL);
- /*
- * As we invariably rely on the struct_mutex within the shrinker,
- * but have a complicated recursion dance, taint all the mutexes used
- * within the shrinker with the struct_mutex. For completeness, we
- * taint with all subclass of struct_mutex, even though we should
- * only need tainting by I915_MM_NORMAL to catch possible ABBA
- * deadlocks from using struct_mutex inside @mutex.
- */
- mutex_acquire(&i915->drm.struct_mutex.dep_map,
- I915_MM_SHRINKER, 0, _RET_IP_);
-
mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
mutex_release(&mutex->dep_map, 0, _RET_IP_);
- mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
-
fs_reclaim_release(GFP_KERNEL);
if (unlock)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index e45eb8721850..fad98a921cde 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -624,8 +624,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
&stolen_offset, &gtt_offset, &size);
@@ -677,21 +675,25 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
* setting up the GTT space. The actual reservation will occur
* later.
*/
+ mutex_lock(&ggtt->vm.mutex);
ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
size, gtt_offset, obj->cache_level,
0);
if (ret) {
DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
+ mutex_unlock(&ggtt->vm.mutex);
goto err_pages;
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ GEM_BUG_ON(vma->pages);
vma->pages = obj->mm.pages;
+ atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
+
set_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
__i915_vma_set_map_and_fenceable(vma);
- mutex_lock(&ggtt->vm.mutex);
list_add_tail(&vma->vm_link, &ggtt->vm.bound_list);
mutex_unlock(&ggtt->vm.mutex);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index e5d1ae8d4dba..dc2a83ce44d5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -181,22 +181,25 @@ static int
i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
int tiling_mode, unsigned int stride)
{
+ struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
struct i915_vma *vma;
- int ret;
+ int ret = 0;
if (tiling_mode == I915_TILING_NONE)
return 0;
+ mutex_lock(&ggtt->vm.mutex);
for_each_ggtt_vma(vma, obj) {
if (i915_vma_fence_prepare(vma, tiling_mode, stride))
continue;
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind(vma);
if (ret)
- return ret;
+ break;
}
+ mutex_unlock(&ggtt->vm.mutex);
- return 0;
+ return ret;
}
int
@@ -212,7 +215,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!i915_tiling_ok(obj, tiling, stride));
GEM_BUG_ON(!stride ^ (tiling == I915_TILING_NONE));
- lockdep_assert_held(&i915->drm.struct_mutex);
if ((tiling | stride) == obj->tiling_and_stride)
return 0;
@@ -233,16 +235,18 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
* whilst executing a fenced command for an untiled object.
*/
- err = i915_gem_object_fence_prepare(obj, tiling, stride);
- if (err)
- return err;
-
i915_gem_object_lock(obj);
if (i915_gem_object_is_framebuffer(obj)) {
i915_gem_object_unlock(obj);
return -EBUSY;
}
+ err = i915_gem_object_fence_prepare(obj, tiling, stride);
+ if (err) {
+ i915_gem_object_unlock(obj);
+ return err;
+ }
+
/* If the memory has unknown (i.e. varying) swizzling, we pin the
* pages to prevent them being swapped out and causing corruption
* due to the change in swizzling.
@@ -368,12 +372,7 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
}
}
- err = mutex_lock_interruptible(&dev->struct_mutex);
- if (err)
- goto err;
-
err = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride);
- mutex_unlock(&dev->struct_mutex);
/* We have to maintain this existing ABI... */
args->stride = i915_gem_object_get_stride(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index a0d974ac78a7..3798502097b4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -92,7 +92,6 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
struct i915_mmu_notifier *mn =
container_of(_mn, struct i915_mmu_notifier, mn);
struct interval_tree_node *it;
- struct mutex *unlock = NULL;
unsigned long end;
int ret = 0;
@@ -129,33 +128,13 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
}
spin_unlock(&mn->lock);
- if (!unlock) {
- unlock = &mn->mm->i915->drm.struct_mutex;
-
- switch (mutex_trylock_recursive(unlock)) {
- default:
- case MUTEX_TRYLOCK_FAILED:
- if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
- i915_gem_object_put(obj);
- return -EINTR;
- }
- /* fall through */
- case MUTEX_TRYLOCK_SUCCESS:
- break;
-
- case MUTEX_TRYLOCK_RECURSIVE:
- unlock = ERR_PTR(-EEXIST);
- break;
- }
- }
-
ret = i915_gem_object_unbind(obj,
I915_GEM_OBJECT_UNBIND_ACTIVE);
if (ret == 0)
ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
i915_gem_object_put(obj);
if (ret)
- goto unlock;
+ return ret;
spin_lock(&mn->lock);
@@ -168,10 +147,6 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
}
spin_unlock(&mn->lock);
-unlock:
- if (!IS_ERR_OR_NULL(unlock))
- mutex_unlock(unlock);
-
return ret;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index c5cea4379216..98b2a6ccfcc1 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -333,7 +333,12 @@ static int igt_check_page_sizes(struct i915_vma *vma)
struct drm_i915_private *i915 = vma->vm->i915;
unsigned int supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj = vma->obj;
- int err = 0;
+ int err;
+
+ /* We have to wait for the async bind to complete before our asserts */
+ err = i915_vma_sync(vma);
+ if (err)
+ return err;
if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
@@ -1390,7 +1395,7 @@ static int igt_ppgtt_pin_update(void *arg)
goto out_unpin;
}
- err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE);
+ err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE, NULL);
if (err)
goto out_unpin;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 0f4d0644a480..8eba0d3a31de 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -971,10 +971,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
if (err)
goto skip_request;
- i915_vma_unpin(batch);
- i915_vma_close(batch);
- i915_vma_put(batch);
-
+ i915_vma_unpin_and_release(&batch, 0);
i915_vma_unpin(vma);
*rq_out = i915_request_get(rq);
@@ -988,8 +985,7 @@ skip_request:
err_request:
i915_request_add(rq);
err_batch:
- i915_vma_unpin(batch);
- i915_vma_put(batch);
+ i915_vma_unpin_and_release(&batch, 0);
err_vma:
i915_vma_unpin(vma);
@@ -1533,9 +1529,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err)
goto skip_request;
- i915_vma_unpin(vma);
- i915_vma_close(vma);
- i915_vma_put(vma);
+ i915_vma_unpin_and_release(&vma, 0);
i915_request_add(rq);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index aefe557527f8..36aca1c172e7 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -322,7 +322,6 @@ static int igt_partial_tiling(void *arg)
goto out;
}
- mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (1) {
@@ -415,7 +414,6 @@ next_tiling: ;
out_unlock:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
out:
i915_gem_object_put(obj);
@@ -458,7 +456,6 @@ static int igt_smoke_tiling(void *arg)
goto out;
}
- mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
count = 0;
@@ -508,7 +505,6 @@ static int igt_smoke_tiling(void *arg)
pr_info("%s: Completed %lu trials\n", __func__, count);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
out:
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index ee5dc13a30b3..6718da20f35d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -154,9 +154,7 @@ int igt_gpu_fill_dw(struct intel_context *ce,
i915_request_add(rq);
- i915_vma_unpin(batch);
- i915_vma_close(batch);
- i915_vma_put(batch);
+ i915_vma_unpin_and_release(&batch, 0);
return 0;
@@ -165,7 +163,6 @@ skip_request:
err_request:
i915_request_add(rq);
err_batch:
- i915_vma_unpin(batch);
- i915_vma_put(batch);
+ i915_vma_unpin_and_release(&batch, 0);
return err;
}