diff options
author | Dave Airlie <airlied@redhat.com> | 2017-06-28 17:07:15 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2017-06-28 17:07:15 +1000 |
commit | 9ff1beb1d19ffe2b26bf9cd2d33e6073d4f4b5fe (patch) | |
tree | 0d154cdf42dfeeb1afa11ee05395f5c16e5fa0f0 /drivers/gpu/drm/i915/i915_gem_execbuffer.c | |
parent | 5193c08c7e82248cb6b1e5d672d261c4da6bb47b (diff) | |
parent | 611cdf3695a307fdca3ff3779a1e6cca935e2d31 (diff) |
Merge tag 'drm-intel-fixes-2017-06-27' of git://anongit.freedesktop.org/git/drm-intel into drm-fixes
Just a few minor fixes. Important one is the execbuf async fix (aka
ANDROID_native_sync). There was another patch for a display coherency
corner case on APL, but we've random-walked in that space too much,
and the cherry-pick looked really invasive.
* tag 'drm-intel-fixes-2017-06-27' of git://anongit.freedesktop.org/git/drm-intel:
drm/i915: Disable EXEC_OBJECT_ASYNC when doing relocations
drm/i915: Hold struct_mutex for per-file stats in debugfs/i915_gem_object
drm/i915: Retire the VMA's fence tracker before unbinding
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_execbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 17 |
1 files changed, 14 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index a3e59c8ef27b..9ad13eeed904 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -546,11 +546,12 @@ repeat: } static int -i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, +i915_gem_execbuffer_relocate_entry(struct i915_vma *vma, struct eb_vmas *eb, struct drm_i915_gem_relocation_entry *reloc, struct reloc_cache *cache) { + struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_gem_object *target_obj; struct drm_i915_gem_object *target_i915_obj; @@ -628,6 +629,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, return -EINVAL; } + /* + * If we write into the object, we need to force the synchronisation + * barrier, either with an asynchronous clflush or if we executed the + * patching using the GPU (though that should be serialised by the + * timeline). To be completely sure, and since we are required to + * do relocations we are already stalling, disable the user's opt + * of our synchronisation. + */ + vma->exec_entry->flags &= ~EXEC_OBJECT_ASYNC; + ret = relocate_entry(obj, reloc, cache, target_offset); if (ret) return ret; @@ -678,7 +689,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, do { u64 offset = r->presumed_offset; - ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache); + ret = i915_gem_execbuffer_relocate_entry(vma, eb, r, &cache); if (ret) goto out; @@ -726,7 +737,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma, reloc_cache_init(&cache, eb->i915); for (i = 0; i < entry->relocation_count; i++) { - ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache); + ret = i915_gem_execbuffer_relocate_entry(vma, eb, &relocs[i], &cache); if (ret) break; } |