diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 57 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_dmabuf.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_render_state.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_perf.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_engine_cs.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lrc.c | 63 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 62 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/huge_pages.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/i915_gem_context.c | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/i915_gem_evict.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/i915_request.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/igt_spinner.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/intel_hangcheck.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/intel_lrc.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/intel_workarounds.c | 3 |
18 files changed, 127 insertions, 134 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b6d674aa2786..fefcb39aefc4 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2965,6 +2965,14 @@ i915_coherent_map_type(struct drm_i915_private *i915) void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, enum i915_map_type type); +void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, + unsigned long offset, + unsigned long size); +static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj) +{ + __i915_gem_object_flush_map(obj, 0, obj->base.size); +} + /** * i915_gem_object_unpin_map - releases an earlier mapping * @obj: the object to unmap diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1a684b7e8c09..72374e952e4b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1713,6 +1713,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) * 2 - Recognise WC as a separate cache domain so that we can flush the * delayed writes via GTT before performing direct access via WC. * + * 3 - Remove implicit set-domain(GTT) and synchronisation on initial + * pagefault; swapin remains transparent. + * * Restrictions: * * * snoopable objects cannot be accessed via the GTT. It can cause machine @@ -1740,7 +1743,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) */ int i915_gem_mmap_gtt_version(void) { - return 2; + return 3; } static inline struct i915_ggtt_view @@ -1808,17 +1811,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) trace_i915_gem_object_fault(obj, page_offset, true, write); - /* Try to flush the object off the GPU first without holding the lock. - * Upon acquiring the lock, we will perform our sanity checks and then - * repeat the flush holding the lock in the normal manner to catch cases - * where we are gazumped. - */ - ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT); - if (ret) - goto err; - ret = i915_gem_object_pin_pages(obj); if (ret) goto err; @@ -1874,10 +1866,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) goto err_unlock; } - ret = i915_gem_object_set_to_gtt_domain(obj, write); - if (ret) - goto err_unpin; - ret = i915_vma_pin_fence(vma); if (ret) goto err_unpin; @@ -2534,6 +2522,14 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, lockdep_assert_held(&obj->mm.lock); + /* Make the pages coherent with the GPU (flushing any swapin). */ + if (obj->cache_dirty) { + obj->write_domain = 0; + if (i915_gem_object_has_struct_page(obj)) + drm_clflush_sg(pages); + obj->cache_dirty = false; + } + obj->mm.get_page.sg_pos = pages->sgl; obj->mm.get_page.sg_idx = 0; @@ -2735,6 +2731,33 @@ err_unlock: goto out_unlock; } +void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, + unsigned long offset, + unsigned long size) +{ + enum i915_map_type has_type; + void *ptr; + + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + GEM_BUG_ON(range_overflows_t(typeof(obj->base.size), + offset, size, obj->base.size)); + + obj->mm.dirty = true; + + if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) + return; + + ptr = page_unpack_bits(obj->mm.mapping, &has_type); + if (has_type == I915_MAP_WC) + return; + + drm_clflush_virt_range(ptr + offset, size); + if (size == obj->base.size) { + obj->write_domain &= ~I915_GEM_DOMAIN_CPU; + obj->cache_dirty = false; + } +} + static int i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pwrite *arg) @@ -4692,6 +4715,8 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) goto err_active; engine->default_state = i915_gem_object_get(state->obj); + i915_gem_object_set_cache_coherency(engine->default_state, + I915_CACHE_LLC); /* Check we can acquire the image of the context state */ vaddr = i915_gem_object_pin_map(engine->default_state, diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 33181678990e..5a101a9462d8 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -107,6 +107,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); + i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index ee6d301a9627..3d672c9edb94 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1001,7 +1001,10 @@ static void reloc_gpu_flush(struct reloc_cache *cache) { GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32)); cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END; + + __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size); i915_gem_object_unpin_map(cache->rq->batch->obj); + i915_gem_chipset_flush(cache->rq->i915); i915_request_add(cache->rq); @@ -1214,10 +1217,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, if (IS_ERR(cmd)) return PTR_ERR(cmd); - err = i915_gem_object_set_to_wc_domain(obj, false); - if (err) - goto err_unmap; - batch = i915_vma_instance(obj, vma->vm, NULL); if (IS_ERR(batch)) { err = PTR_ERR(batch); diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 91196348c68c..9440024c763f 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -164,7 +164,7 @@ static int render_state_setup(struct intel_render_state *so, drm_clflush_virt_range(d, i * sizeof(u32)); kunmap_atomic(d); - ret = i915_gem_object_set_to_gtt_domain(so->obj, false); + ret = 0; out: i915_gem_obj_finish_shmem_access(so->obj); return ret; diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index e0fcb982a14f..85c5cb779297 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1509,9 +1509,7 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv) goto unlock; } - ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC); - if (ret) - goto err_unref; + i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC); /* PreHSW required 512K alignment, HSW requires 16M */ vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0); diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 24de34289d68..c5b417327132 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -528,9 +528,7 @@ static int init_status_page(struct intel_engine_cs *engine) return PTR_ERR(obj); } - ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - if (ret) - goto err; + i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL); if (IS_ERR(vma)) { diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 131b89972a78..35f7ef9e75c8 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1248,6 +1248,30 @@ static void execlists_context_destroy(struct kref *kref) intel_context_free(ce); } +static int __context_pin(struct i915_vma *vma) +{ + unsigned int flags; + int err; + + flags = PIN_GLOBAL | PIN_HIGH; + flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); + + err = i915_vma_pin(vma, 0, 0, flags); + if (err) + return err; + + vma->obj->pin_global++; + vma->obj->mm.dirty = true; + + return 0; +} + +static void __context_unpin(struct i915_vma *vma) +{ + vma->obj->pin_global--; + __i915_vma_unpin(vma); +} + static void execlists_context_unpin(struct intel_context *ce) { struct intel_engine_cs *engine; @@ -1276,31 +1300,8 @@ static void execlists_context_unpin(struct intel_context *ce) intel_ring_unpin(ce->ring); - ce->state->obj->pin_global--; i915_gem_object_unpin_map(ce->state->obj); - i915_vma_unpin(ce->state); -} - -static int __context_pin(struct i915_vma *vma) -{ - unsigned int flags; - int err; - - /* - * Clear this page out of any CPU caches for coherent swap-in/out. - * We only want to do this on the first bind so that we do not stall - * on an active context (which by nature is already on the GPU). - */ - if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { - err = i915_gem_object_set_to_wc_domain(vma->obj, true); - if (err) - return err; - } - - flags = PIN_GLOBAL | PIN_HIGH; - flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); - - return i915_vma_pin(vma, 0, 0, flags); + __context_unpin(ce->state); } static void @@ -1361,7 +1362,6 @@ __execlists_context_pin(struct intel_context *ce, ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; __execlists_update_reg_state(ce, engine); - ce->state->obj->pin_global++; return 0; unpin_ring: @@ -1369,7 +1369,7 @@ unpin_ring: unpin_map: i915_gem_object_unpin_map(ce->state->obj); unpin_vma: - __i915_vma_unpin(ce->state); + __context_unpin(ce->state); err: return ret; } @@ -2751,19 +2751,12 @@ populate_lr_context(struct intel_context *ce, u32 *regs; int ret; - ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true); - if (ret) { - DRM_DEBUG_DRIVER("Could not set to CPU domain\n"); - return ret; - } - vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); if (IS_ERR(vaddr)) { ret = PTR_ERR(vaddr); DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret); return ret; } - ctx_obj->mm.dirty = true; if (engine->default_state) { /* @@ -2798,7 +2791,11 @@ populate_lr_context(struct intel_context *ce, _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT); + ret = 0; err_unpin_ctx: + __i915_gem_object_flush_map(ctx_obj, + LRC_HEADER_PAGES * PAGE_SIZE, + engine->context_size); i915_gem_object_unpin_map(ctx_obj); return ret; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 03bbdf47e7e4..359d6af1a0b9 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1195,15 +1195,6 @@ int intel_ring_pin(struct intel_ring *ring) else flags |= PIN_HIGH; - if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { - if (flags & PIN_MAPPABLE || map == I915_MAP_WC) - ret = i915_gem_object_set_to_gtt_domain(vma->obj, true); - else - ret = i915_gem_object_set_to_cpu_domain(vma->obj, true); - if (unlikely(ret)) - goto unpin_timeline; - } - ret = i915_vma_pin(vma, 0, 0, flags); if (unlikely(ret)) goto unpin_timeline; @@ -1392,17 +1383,6 @@ static int __context_pin(struct intel_context *ce) if (!vma) return 0; - /* - * Clear this page out of any CPU caches for coherent swap-in/out. - * We only want to do this on the first bind so that we do not stall - * on an active context (which by nature is already on the GPU). - */ - if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { - err = i915_gem_object_set_to_gtt_domain(vma->obj, true); - if (err) - return err; - } - err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); if (err) return err; @@ -1412,6 +1392,7 @@ static int __context_pin(struct intel_context *ce) * it cannot reclaim the object until we release it. */ vma->obj->pin_global++; + vma->obj->mm.dirty = true; return 0; } @@ -1446,6 +1427,24 @@ alloc_context_vma(struct intel_engine_cs *engine) if (IS_ERR(obj)) return ERR_CAST(obj); + /* + * Try to make the context utilize L3 as well as LLC. + * + * On VLV we don't have L3 controls in the PTEs so we + * shouldn't touch the cache level, especially as that + * would make the object snooped which might have a + * negative performance impact. + * + * Snooping is required on non-llc platforms in execlist + * mode, but since all GGTT accesses use PAT entry 0 we + * get snooping anyway regardless of cache_level. + * + * This is only applicable for Ivy Bridge devices since + * later platforms don't have L3 control bits in the PTE. + */ + if (IS_IVYBRIDGE(i915)) + i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC); + if (engine->default_state) { void *defaults, *vaddr; @@ -1463,29 +1462,10 @@ alloc_context_vma(struct intel_engine_cs *engine) } memcpy(vaddr, defaults, engine->context_size); - i915_gem_object_unpin_map(engine->default_state); - i915_gem_object_unpin_map(obj); - } - /* - * Try to make the context utilize L3 as well as LLC. - * - * On VLV we don't have L3 controls in the PTEs so we - * shouldn't touch the cache level, especially as that - * would make the object snooped which might have a - * negative performance impact. - * - * Snooping is required on non-llc platforms in execlist - * mode, but since all GGTT accesses use PAT entry 0 we - * get snooping anyway regardless of cache_level. - * - * This is only applicable for Ivy Bridge devices since - * later platforms don't have L3 control bits in the PTE. - */ - if (IS_IVYBRIDGE(i915)) { - /* Ignore any error, regard it as a simple optimisation */ - i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); } vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index 1f419fb094f4..c5c8ba6c059f 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -908,10 +908,6 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val) if (IS_ERR(obj)) return ERR_CAST(obj); - err = i915_gem_object_set_to_wc_domain(obj, true); - if (err) - goto err; - cmd = i915_gem_object_pin_map(obj, I915_MAP_WC); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); @@ -1584,6 +1580,7 @@ static int igt_tmpfs_fallback(void *arg) } *vaddr = 0xdeadbeaf; + __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); vma = i915_vma_instance(obj, vm, NULL); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index ab2aa32fd019..fb6dc54ce7ff 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -220,6 +220,7 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value) offset += PAGE_SIZE; } *cmd = MI_BATCH_BUFFER_END; + i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); err = i915_gem_object_set_to_gtt_domain(obj, false); @@ -604,12 +605,9 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma) *cmd++ = upper_32_bits(vma->node.start); *cmd = MI_BATCH_BUFFER_END; + __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); - err = i915_gem_object_set_to_gtt_domain(obj, false); - if (err) - goto err; - vma = i915_vma_instance(obj, vma->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); @@ -1202,12 +1200,9 @@ static int write_to_scratch(struct i915_gem_context *ctx, } *cmd++ = value; *cmd = MI_BATCH_BUFFER_END; + __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); - err = i915_gem_object_set_to_gtt_domain(obj, false); - if (err) - goto err; - vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); @@ -1299,11 +1294,9 @@ static int read_from_scratch(struct i915_gem_context *ctx, *cmd++ = result; } *cmd = MI_BATCH_BUFFER_END; - i915_gem_object_unpin_map(obj); - err = i915_gem_object_set_to_gtt_domain(obj, false); - if (err) - goto err; + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); if (IS_ERR(vma)) { diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c index a7055b12e53c..2b943ee246c9 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c @@ -315,6 +315,7 @@ static int igt_dmabuf_export_kmap(void *arg) goto err; } memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE); + i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); ptr = dma_buf_kmap(dmabuf, 1); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index b270eab1cad1..9a9451846b33 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -274,7 +274,7 @@ static int igt_evict_for_cache_color(void *arg) err = PTR_ERR(obj); goto cleanup; } - i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); quirk_add(obj, &objects); vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, @@ -290,7 +290,7 @@ static int igt_evict_for_cache_color(void *arg) err = PTR_ERR(obj); goto cleanup; } - i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); quirk_add(obj, &objects); /* Neighbouring; same colour - should fit */ diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 3eb6a6b075ab..e6ffe2240126 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -619,13 +619,11 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915) } *cmd = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(i915); + __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); - err = i915_gem_object_set_to_gtt_domain(obj, false); - if (err) - goto err; + i915_gem_chipset_flush(i915); vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); if (IS_ERR(vma)) { @@ -777,10 +775,6 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) if (err) goto err; - err = i915_gem_object_set_to_wc_domain(obj, true); - if (err) - goto err; - cmd = i915_gem_object_pin_map(obj, I915_MAP_WC); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); @@ -799,10 +793,12 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) *cmd++ = lower_32_bits(vma->node.start); } *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */ - i915_gem_chipset_flush(i915); + __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); + i915_gem_chipset_flush(i915); + return vma; err: diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index d0b93a3fbc54..16890dfe74c0 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -29,7 +29,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915) goto err_hws; } - i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC); + i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC); vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index b5e35b2a925f..76b4fa150f2e 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -70,7 +70,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915) goto err_hws; } - i915_gem_object_set_cache_level(h->hws, I915_CACHE_LLC); + i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC); vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index d61520ea03c1..9e871eb0bfb1 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c @@ -1018,12 +1018,9 @@ static int live_preempt_smoke(void *arg) for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++) cs[n] = MI_ARB_CHECK; cs[n] = MI_BATCH_BUFFER_END; + i915_gem_object_flush_map(smoke.batch); i915_gem_object_unpin_map(smoke.batch); - err = i915_gem_object_set_to_gtt_domain(smoke.batch, false); - if (err) - goto err_batch; - for (n = 0; n < smoke.ncontext; n++) { smoke.contexts[n] = kernel_context(smoke.i915); if (!smoke.contexts[n]) diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c index f2a2b51a4662..3baed59008d7 100644 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c @@ -90,6 +90,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) goto err_obj; } memset(cs, 0xc5, PAGE_SIZE); + i915_gem_object_flush_map(result); i915_gem_object_unpin_map(result); vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL); @@ -358,6 +359,7 @@ static struct i915_vma *create_scratch(struct i915_gem_context *ctx) goto err_obj; } memset(ptr, 0xc5, PAGE_SIZE); + i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); @@ -551,6 +553,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx, *cs++ = MI_BATCH_BUFFER_END; + i915_gem_object_flush_map(batch->obj); i915_gem_object_unpin_map(batch->obj); i915_gem_chipset_flush(ctx->i915); |