diff options
Diffstat (limited to 'drivers/gpu/drm/i915/selftests')
-rw-r--r-- | drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/igt_reset.c | 44 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/igt_reset.h | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/igt_spinner.c | 199 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/igt_spinner.h | 37 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/intel_hangcheck.c | 63 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/intel_lrc.c | 301 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/intel_workarounds.c | 247 |
8 files changed, 578 insertions, 332 deletions
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 69fe86b30fbb..a9ed0ecc94e2 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -170,7 +170,7 @@ static int igt_ppgtt_alloc(void *arg) * This should ensure that we do not run into the oomkiller during * the test and take down the machine wilfully. */ - limit = totalram_pages << PAGE_SHIFT; + limit = totalram_pages() << PAGE_SHIFT; limit = min(ppgtt->vm.total, limit); /* Check we can allocate the entire range */ @@ -1244,7 +1244,7 @@ static int exercise_mock(struct drm_i915_private *i915, u64 hole_start, u64 hole_end, unsigned long end_time)) { - const u64 limit = totalram_pages << PAGE_SHIFT; + const u64 limit = totalram_pages() << PAGE_SHIFT; struct i915_gem_context *ctx; struct i915_hw_ppgtt *ppgtt; IGT_TIMEOUT(end_time); diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c new file mode 100644 index 000000000000..208a966da8ca --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_reset.c @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include "igt_reset.h" + +#include "../i915_drv.h" +#include "../intel_ringbuffer.h" + +void igt_global_reset_lock(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + pr_debug("%s: current gpu_error=%08lx\n", + __func__, i915->gpu_error.flags); + + while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)) + wait_event(i915->gpu_error.reset_queue, + !test_bit(I915_RESET_BACKOFF, + &i915->gpu_error.flags)); + + for_each_engine(engine, i915, id) { + while (test_and_set_bit(I915_RESET_ENGINE + id, + &i915->gpu_error.flags)) + wait_on_bit(&i915->gpu_error.flags, + I915_RESET_ENGINE + id, + TASK_UNINTERRUPTIBLE); + } +} + +void igt_global_reset_unlock(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, i915, id) + clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + + clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); + wake_up_all(&i915->gpu_error.reset_queue); +} diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.h b/drivers/gpu/drm/i915/selftests/igt_reset.h new file mode 100644 index 000000000000..5f0234d045d5 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_reset.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#ifndef __I915_SELFTESTS_IGT_RESET_H__ +#define __I915_SELFTESTS_IGT_RESET_H__ + +#include "../i915_drv.h" + +void igt_global_reset_lock(struct drm_i915_private *i915); +void igt_global_reset_unlock(struct drm_i915_private *i915); + +#endif diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c new file mode 100644 index 000000000000..8cd34f6e6859 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -0,0 +1,199 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include "igt_spinner.h" + +int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915) +{ + unsigned int mode; + void *vaddr; + int err; + + GEM_BUG_ON(INTEL_GEN(i915) < 8); + + memset(spin, 0, sizeof(*spin)); + spin->i915 = i915; + + spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(spin->hws)) { + err = PTR_ERR(spin->hws); + goto err; + } + + spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(spin->obj)) { + err = PTR_ERR(spin->obj); + goto err_hws; + } + + i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC); + vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto err_obj; + } + spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); + + mode = i915_coherent_map_type(i915); + vaddr = i915_gem_object_pin_map(spin->obj, mode); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto err_unpin_hws; + } + spin->batch = vaddr; + + return 0; + +err_unpin_hws: + i915_gem_object_unpin_map(spin->hws); +err_obj: + i915_gem_object_put(spin->obj); +err_hws: + i915_gem_object_put(spin->hws); +err: + return err; +} + +static unsigned int seqno_offset(u64 fence) +{ + return offset_in_page(sizeof(u32) * fence); +} + +static u64 hws_address(const struct i915_vma *hws, + const struct i915_request *rq) +{ + return hws->node.start + seqno_offset(rq->fence.context); +} + +static int emit_recurse_batch(struct igt_spinner *spin, + struct i915_request *rq, + u32 arbitration_command) +{ + struct i915_address_space *vm = &rq->gem_context->ppgtt->vm; + struct i915_vma *hws, *vma; + u32 *batch; + int err; + + vma = i915_vma_instance(spin->obj, vm, NULL); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + hws = i915_vma_instance(spin->hws, vm, NULL); + if (IS_ERR(hws)) + return PTR_ERR(hws); + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + return err; + + err = i915_vma_pin(hws, 0, 0, PIN_USER); + if (err) + goto unpin_vma; + + err = i915_vma_move_to_active(vma, rq, 0); + if (err) + goto unpin_hws; + + if (!i915_gem_object_has_active_reference(vma->obj)) { + i915_gem_object_get(vma->obj); + i915_gem_object_set_active_reference(vma->obj); + } + + err = i915_vma_move_to_active(hws, rq, 0); + if (err) + goto unpin_hws; + + if (!i915_gem_object_has_active_reference(hws->obj)) { + i915_gem_object_get(hws->obj); + i915_gem_object_set_active_reference(hws->obj); + } + + batch = spin->batch; + + *batch++ = MI_STORE_DWORD_IMM_GEN4; + *batch++ = lower_32_bits(hws_address(hws, rq)); + *batch++ = upper_32_bits(hws_address(hws, rq)); + *batch++ = rq->fence.seqno; + + *batch++ = arbitration_command; + + *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; + *batch++ = lower_32_bits(vma->node.start); + *batch++ = upper_32_bits(vma->node.start); + *batch++ = MI_BATCH_BUFFER_END; /* not reached */ + + i915_gem_chipset_flush(spin->i915); + + err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); + +unpin_hws: + i915_vma_unpin(hws); +unpin_vma: + i915_vma_unpin(vma); + return err; +} + +struct i915_request * +igt_spinner_create_request(struct igt_spinner *spin, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + u32 arbitration_command) +{ + struct i915_request *rq; + int err; + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) + return rq; + + err = emit_recurse_batch(spin, rq, arbitration_command); + if (err) { + i915_request_add(rq); + return ERR_PTR(err); + } + + return rq; +} + +static u32 +hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq) +{ + u32 *seqno = spin->seqno + seqno_offset(rq->fence.context); + + return READ_ONCE(*seqno); +} + +void igt_spinner_end(struct igt_spinner *spin) +{ + *spin->batch = MI_BATCH_BUFFER_END; + i915_gem_chipset_flush(spin->i915); +} + +void igt_spinner_fini(struct igt_spinner *spin) +{ + igt_spinner_end(spin); + + i915_gem_object_unpin_map(spin->obj); + i915_gem_object_put(spin->obj); + + i915_gem_object_unpin_map(spin->hws); + i915_gem_object_put(spin->hws); +} + +bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq) +{ + if (!wait_event_timeout(rq->execute, + READ_ONCE(rq->global_seqno), + msecs_to_jiffies(10))) + return false; + + return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq), + rq->fence.seqno), + 10) && + wait_for(i915_seqno_passed(hws_seqno(spin, rq), + rq->fence.seqno), + 1000)); +} diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h new file mode 100644 index 000000000000..391777c76dc7 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#ifndef __I915_SELFTESTS_IGT_SPINNER_H__ +#define __I915_SELFTESTS_IGT_SPINNER_H__ + +#include "../i915_selftest.h" + +#include "../i915_drv.h" +#include "../i915_request.h" +#include "../intel_ringbuffer.h" +#include "../i915_gem_context.h" + +struct igt_spinner { + struct drm_i915_private *i915; + struct drm_i915_gem_object *hws; + struct drm_i915_gem_object *obj; + u32 *batch; + void *seqno; +}; + +int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915); +void igt_spinner_fini(struct igt_spinner *spin); + +struct i915_request * +igt_spinner_create_request(struct igt_spinner *spin, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + u32 arbitration_command); +void igt_spinner_end(struct igt_spinner *spin); + +bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq); + +#endif diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index 33494d922fab..5910da3e7d79 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -27,6 +27,7 @@ #include "../i915_selftest.h" #include "i915_random.h" #include "igt_flush_test.h" +#include "igt_reset.h" #include "igt_wedge_me.h" #include "mock_context.h" @@ -308,6 +309,7 @@ static int igt_hang_sanitycheck(void *arg) goto unlock; for_each_engine(engine, i915, id) { + struct igt_wedge_me w; long timeout; if (!intel_engine_can_store_dword(engine)) @@ -328,9 +330,14 @@ static int igt_hang_sanitycheck(void *arg) i915_request_add(rq); - timeout = i915_request_wait(rq, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); + timeout = 0; + igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/) + timeout = i915_request_wait(rq, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); + if (i915_terminally_wedged(&i915->gpu_error)) + timeout = -EIO; + i915_request_put(rq); if (timeout < 0) { @@ -348,40 +355,6 @@ unlock: return err; } -static void global_reset_lock(struct drm_i915_private *i915) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - pr_debug("%s: current gpu_error=%08lx\n", - __func__, i915->gpu_error.flags); - - while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)) - wait_event(i915->gpu_error.reset_queue, - !test_bit(I915_RESET_BACKOFF, - &i915->gpu_error.flags)); - - for_each_engine(engine, i915, id) { - while (test_and_set_bit(I915_RESET_ENGINE + id, - &i915->gpu_error.flags)) - wait_on_bit(&i915->gpu_error.flags, - I915_RESET_ENGINE + id, - TASK_UNINTERRUPTIBLE); - } -} - -static void global_reset_unlock(struct drm_i915_private *i915) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, i915, id) - clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - - clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); - wake_up_all(&i915->gpu_error.reset_queue); -} - static int igt_global_reset(void *arg) { struct drm_i915_private *i915 = arg; @@ -390,7 +363,7 @@ static int igt_global_reset(void *arg) /* Check that we can issue a global GPU reset */ - global_reset_lock(i915); + igt_global_reset_lock(i915); set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags); mutex_lock(&i915->drm.struct_mutex); @@ -405,7 +378,7 @@ static int igt_global_reset(void *arg) mutex_unlock(&i915->drm.struct_mutex); GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags)); - global_reset_unlock(i915); + igt_global_reset_unlock(i915); if (i915_terminally_wedged(&i915->gpu_error)) err = -EIO; @@ -936,7 +909,7 @@ static int igt_reset_wait(void *arg) /* Check that we detect a stuck waiter and issue a reset */ - global_reset_lock(i915); + igt_global_reset_lock(i915); mutex_lock(&i915->drm.struct_mutex); err = hang_init(&h, i915); @@ -988,7 +961,7 @@ fini: hang_fini(&h); unlock: mutex_unlock(&i915->drm.struct_mutex); - global_reset_unlock(i915); + igt_global_reset_unlock(i915); if (i915_terminally_wedged(&i915->gpu_error)) return -EIO; @@ -1066,7 +1039,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, /* Check that we can recover an unbind stuck on a hanging request */ - global_reset_lock(i915); + igt_global_reset_lock(i915); mutex_lock(&i915->drm.struct_mutex); err = hang_init(&h, i915); @@ -1186,7 +1159,7 @@ fini: hang_fini(&h); unlock: mutex_unlock(&i915->drm.struct_mutex); - global_reset_unlock(i915); + igt_global_reset_unlock(i915); if (i915_terminally_wedged(&i915->gpu_error)) return -EIO; @@ -1266,7 +1239,7 @@ static int igt_reset_queue(void *arg) /* Check that we replay pending requests following a hang */ - global_reset_lock(i915); + igt_global_reset_lock(i915); mutex_lock(&i915->drm.struct_mutex); err = hang_init(&h, i915); @@ -1397,7 +1370,7 @@ fini: hang_fini(&h); unlock: mutex_unlock(&i915->drm.struct_mutex); - global_reset_unlock(i915); + igt_global_reset_unlock(i915); if (i915_terminally_wedged(&i915->gpu_error)) return -EIO; diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index 94fc0e5c8766..ca461e3a5f27 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c @@ -6,216 +6,18 @@ #include "../i915_selftest.h" #include "igt_flush_test.h" +#include "igt_spinner.h" #include "i915_random.h" #include "mock_context.h" -struct spinner { - struct drm_i915_private *i915; - struct drm_i915_gem_object *hws; - struct drm_i915_gem_object *obj; - u32 *batch; - void *seqno; -}; - -static int spinner_init(struct spinner *spin, struct drm_i915_private *i915) -{ - unsigned int mode; - void *vaddr; - int err; - - GEM_BUG_ON(INTEL_GEN(i915) < 8); - - memset(spin, 0, sizeof(*spin)); - spin->i915 = i915; - - spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(spin->hws)) { - err = PTR_ERR(spin->hws); - goto err; - } - - spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(spin->obj)) { - err = PTR_ERR(spin->obj); - goto err_hws; - } - - i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC); - vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_obj; - } - spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); - - mode = i915_coherent_map_type(i915); - vaddr = i915_gem_object_pin_map(spin->obj, mode); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_unpin_hws; - } - spin->batch = vaddr; - - return 0; - -err_unpin_hws: - i915_gem_object_unpin_map(spin->hws); -err_obj: - i915_gem_object_put(spin->obj); -err_hws: - i915_gem_object_put(spin->hws); -err: - return err; -} - -static unsigned int seqno_offset(u64 fence) -{ - return offset_in_page(sizeof(u32) * fence); -} - -static u64 hws_address(const struct i915_vma *hws, - const struct i915_request *rq) -{ - return hws->node.start + seqno_offset(rq->fence.context); -} - -static int emit_recurse_batch(struct spinner *spin, - struct i915_request *rq, - u32 arbitration_command) -{ - struct i915_address_space *vm = &rq->gem_context->ppgtt->vm; - struct i915_vma *hws, *vma; - u32 *batch; - int err; - - vma = i915_vma_instance(spin->obj, vm, NULL); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - hws = i915_vma_instance(spin->hws, vm, NULL); - if (IS_ERR(hws)) - return PTR_ERR(hws); - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - return err; - - err = i915_vma_pin(hws, 0, 0, PIN_USER); - if (err) - goto unpin_vma; - - err = i915_vma_move_to_active(vma, rq, 0); - if (err) - goto unpin_hws; - - if (!i915_gem_object_has_active_reference(vma->obj)) { - i915_gem_object_get(vma->obj); - i915_gem_object_set_active_reference(vma->obj); - } - - err = i915_vma_move_to_active(hws, rq, 0); - if (err) - goto unpin_hws; - - if (!i915_gem_object_has_active_reference(hws->obj)) { - i915_gem_object_get(hws->obj); - i915_gem_object_set_active_reference(hws->obj); - } - - batch = spin->batch; - - *batch++ = MI_STORE_DWORD_IMM_GEN4; - *batch++ = lower_32_bits(hws_address(hws, rq)); - *batch++ = upper_32_bits(hws_address(hws, rq)); - *batch++ = rq->fence.seqno; - - *batch++ = arbitration_command; - - *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; - *batch++ = lower_32_bits(vma->node.start); - *batch++ = upper_32_bits(vma->node.start); - *batch++ = MI_BATCH_BUFFER_END; /* not reached */ - - i915_gem_chipset_flush(spin->i915); - - err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); - -unpin_hws: - i915_vma_unpin(hws); -unpin_vma: - i915_vma_unpin(vma); - return err; -} - -static struct i915_request * -spinner_create_request(struct spinner *spin, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - u32 arbitration_command) -{ - struct i915_request *rq; - int err; - - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) - return rq; - - err = emit_recurse_batch(spin, rq, arbitration_command); - if (err) { - i915_request_add(rq); - return ERR_PTR(err); - } - - return rq; -} - -static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq) -{ - u32 *seqno = spin->seqno + seqno_offset(rq->fence.context); - - return READ_ONCE(*seqno); -} - -static void spinner_end(struct spinner *spin) -{ - *spin->batch = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(spin->i915); -} - -static void spinner_fini(struct spinner *spin) -{ - spinner_end(spin); - - i915_gem_object_unpin_map(spin->obj); - i915_gem_object_put(spin->obj); - - i915_gem_object_unpin_map(spin->hws); - i915_gem_object_put(spin->hws); -} - -static bool wait_for_spinner(struct spinner *spin, struct i915_request *rq) -{ - if (!wait_event_timeout(rq->execute, - READ_ONCE(rq->global_seqno), - msecs_to_jiffies(10))) - return false; - - return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq), - rq->fence.seqno), - 10) && - wait_for(i915_seqno_passed(hws_seqno(spin, rq), - rq->fence.seqno), - 1000)); -} - static int live_sanitycheck(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; struct i915_gem_context *ctx; enum intel_engine_id id; - struct spinner spin; + struct igt_spinner spin; int err = -ENOMEM; if (!HAS_LOGICAL_RING_CONTEXTS(i915)) @@ -224,7 +26,7 @@ static int live_sanitycheck(void *arg) mutex_lock(&i915->drm.struct_mutex); intel_runtime_pm_get(i915); - if (spinner_init(&spin, i915)) + if (igt_spinner_init(&spin, i915)) goto err_unlock; ctx = kernel_context(i915); @@ -234,14 +36,14 @@ static int live_sanitycheck(void *arg) for_each_engine(engine, i915, id) { struct i915_request *rq; - rq = spinner_create_request(&spin, ctx, engine, MI_NOOP); + rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_ctx; } i915_request_add(rq); - if (!wait_for_spinner(&spin, rq)) { + if (!igt_wait_for_spinner(&spin, rq)) { GEM_TRACE("spinner failed to start\n"); GEM_TRACE_DUMP(); i915_gem_set_wedged(i915); @@ -249,7 +51,7 @@ static int live_sanitycheck(void *arg) goto err_ctx; } - spinner_end(&spin); + igt_spinner_end(&spin); if (igt_flush_test(i915, I915_WAIT_LOCKED)) { err = -EIO; goto err_ctx; @@ -260,7 +62,7 @@ static int live_sanitycheck(void *arg) err_ctx: kernel_context_close(ctx); err_spin: - spinner_fini(&spin); + igt_spinner_fini(&spin); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); intel_runtime_pm_put(i915); @@ -272,7 +74,7 @@ static int live_preempt(void *arg) { struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx_hi, *ctx_lo; - struct spinner spin_hi, spin_lo; + struct igt_spinner spin_hi, spin_lo; struct intel_engine_cs *engine; enum intel_engine_id id; int err = -ENOMEM; @@ -283,10 +85,10 @@ static int live_preempt(void *arg) mutex_lock(&i915->drm.struct_mutex); intel_runtime_pm_get(i915); - if (spinner_init(&spin_hi, i915)) + if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; - if (spinner_init(&spin_lo, i915)) + if (igt_spinner_init(&spin_lo, i915)) goto err_spin_hi; ctx_hi = kernel_context(i915); @@ -304,15 +106,15 @@ static int live_preempt(void *arg) for_each_engine(engine, i915, id) { struct i915_request *rq; - rq = spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); + rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_ctx_lo; } i915_request_add(rq); - if (!wait_for_spinner(&spin_lo, rq)) { + if (!igt_wait_for_spinner(&spin_lo, rq)) { GEM_TRACE("lo spinner failed to start\n"); GEM_TRACE_DUMP(); i915_gem_set_wedged(i915); @@ -320,16 +122,16 @@ static int live_preempt(void *arg) goto err_ctx_lo; } - rq = spinner_create_request(&spin_hi, ctx_hi, engine, - MI_ARB_CHECK); + rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) { - spinner_end(&spin_lo); + igt_spinner_end(&spin_lo); err = PTR_ERR(rq); goto err_ctx_lo; } i915_request_add(rq); - if (!wait_for_spinner(&spin_hi, rq)) { + if (!igt_wait_for_spinner(&spin_hi, rq)) { GEM_TRACE("hi spinner failed to start\n"); GEM_TRACE_DUMP(); i915_gem_set_wedged(i915); @@ -337,8 +139,8 @@ static int live_preempt(void *arg) goto err_ctx_lo; } - spinner_end(&spin_hi); - spinner_end(&spin_lo); + igt_spinner_end(&spin_hi); + igt_spinner_end(&spin_lo); if (igt_flush_test(i915, I915_WAIT_LOCKED)) { err = -EIO; goto err_ctx_lo; @@ -351,9 +153,9 @@ err_ctx_lo: err_ctx_hi: kernel_context_close(ctx_hi); err_spin_lo: - spinner_fini(&spin_lo); + igt_spinner_fini(&spin_lo); err_spin_hi: - spinner_fini(&spin_hi); + igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); intel_runtime_pm_put(i915); @@ -365,7 +167,7 @@ static int live_late_preempt(void *arg) { struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx_hi, *ctx_lo; - struct spinner spin_hi, spin_lo; + struct igt_spinner spin_hi, spin_lo; struct intel_engine_cs *engine; struct i915_sched_attr attr = {}; enum intel_engine_id id; @@ -377,10 +179,10 @@ static int live_late_preempt(void *arg) mutex_lock(&i915->drm.struct_mutex); intel_runtime_pm_get(i915); - if (spinner_init(&spin_hi, i915)) + if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; - if (spinner_init(&spin_lo, i915)) + if (igt_spinner_init(&spin_lo, i915)) goto err_spin_hi; ctx_hi = kernel_context(i915); @@ -394,28 +196,29 @@ static int live_late_preempt(void *arg) for_each_engine(engine, i915, id) { struct i915_request *rq; - rq = spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); + rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_ctx_lo; } i915_request_add(rq); - if (!wait_for_spinner(&spin_lo, rq)) { + if (!igt_wait_for_spinner(&spin_lo, rq)) { pr_err("First context failed to start\n"); goto err_wedged; } - rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP); + rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, + MI_NOOP); if (IS_ERR(rq)) { - spinner_end(&spin_lo); + igt_spinner_end(&spin_lo); err = PTR_ERR(rq); goto err_ctx_lo; } i915_request_add(rq); - if (wait_for_spinner(&spin_hi, rq)) { + if (igt_wait_for_spinner(&spin_hi, rq)) { pr_err("Second context overtook first?\n"); goto err_wedged; } @@ -423,14 +226,14 @@ static int live_late_preempt(void *arg) attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); engine->schedule(rq, &attr); - if (!wait_for_spinner(&spin_hi, rq)) { + if (!igt_wait_for_spinner(&spin_hi, rq)) { pr_err("High priority context failed to preempt the low priority context\n"); GEM_TRACE_DUMP(); goto err_wedged; } - spinner_end(&spin_hi); - spinner_end(&spin_lo); + igt_spinner_end(&spin_hi); + igt_spinner_end(&spin_lo); if (igt_flush_test(i915, I915_WAIT_LOCKED)) { err = -EIO; goto err_ctx_lo; @@ -443,9 +246,9 @@ err_ctx_lo: err_ctx_hi: kernel_context_close(ctx_hi); err_spin_lo: - spinner_fini(&spin_lo); + igt_spinner_fini(&spin_lo); err_spin_hi: - spinner_fini(&spin_hi); + igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); intel_runtime_pm_put(i915); @@ -453,8 +256,8 @@ err_unlock: return err; err_wedged: - spinner_end(&spin_hi); - spinner_end(&spin_lo); + igt_spinner_end(&spin_hi); + igt_spinner_end(&spin_lo); i915_gem_set_wedged(i915); err = -EIO; goto err_ctx_lo; @@ -464,7 +267,7 @@ static int live_preempt_hang(void *arg) { struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx_hi, *ctx_lo; - struct spinner spin_hi, spin_lo; + struct igt_spinner spin_hi, spin_lo; struct intel_engine_cs *engine; enum intel_engine_id id; int err = -ENOMEM; @@ -478,10 +281,10 @@ static int live_preempt_hang(void *arg) mutex_lock(&i915->drm.struct_mutex); intel_runtime_pm_get(i915); - if (spinner_init(&spin_hi, i915)) + if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; - if (spinner_init(&spin_lo, i915)) + if (igt_spinner_init(&spin_lo, i915)) goto err_spin_hi; ctx_hi = kernel_context(i915); @@ -500,15 +303,15 @@ static int live_preempt_hang(void *arg) if (!intel_engine_has_preemption(engine)) continue; - rq = spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); + rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_ctx_lo; } i915_request_add(rq); - if (!wait_for_spinner(&spin_lo, rq)) { + if (!igt_wait_for_spinner(&spin_lo, rq)) { GEM_TRACE("lo spinner failed to start\n"); GEM_TRACE_DUMP(); i915_gem_set_wedged(i915); @@ -516,10 +319,10 @@ static int live_preempt_hang(void *arg) goto err_ctx_lo; } - rq = spinner_create_request(&spin_hi, ctx_hi, engine, - MI_ARB_CHECK); + rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) { - spinner_end(&spin_lo); + igt_spinner_end(&spin_lo); err = PTR_ERR(rq); goto err_ctx_lo; } @@ -544,7 +347,7 @@ static int live_preempt_hang(void *arg) engine->execlists.preempt_hang.inject_hang = false; - if (!wait_for_spinner(&spin_hi, rq)) { + if (!igt_wait_for_spinner(&spin_hi, rq)) { GEM_TRACE("hi spinner failed to start\n"); GEM_TRACE_DUMP(); i915_gem_set_wedged(i915); @@ -552,8 +355,8 @@ static int live_preempt_hang(void *arg) goto err_ctx_lo; } - spinner_end(&spin_hi); - spinner_end(&spin_lo); + igt_spinner_end(&spin_hi); + igt_spinner_end(&spin_lo); if (igt_flush_test(i915, I915_WAIT_LOCKED)) { err = -EIO; goto err_ctx_lo; @@ -566,9 +369,9 @@ err_ctx_lo: err_ctx_hi: kernel_context_close(ctx_hi); err_spin_lo: - spinner_fini(&spin_lo); + igt_spinner_fini(&spin_lo); err_spin_hi: - spinner_fini(&spin_hi); + igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); intel_runtime_pm_put(i915); diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c index d1a0923d2f38..67017d5175b8 100644 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c @@ -6,6 +6,9 @@ #include "../i915_selftest.h" +#include "igt_flush_test.h" +#include "igt_reset.h" +#include "igt_spinner.h" #include "igt_wedge_me.h" #include "mock_context.h" @@ -91,17 +94,23 @@ err_obj: return ERR_PTR(err); } -static u32 get_whitelist_reg(const struct whitelist *w, unsigned int i) +static u32 +get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i) { - return i < w->count ? i915_mmio_reg_offset(w->reg[i]) : w->nopid; + i915_reg_t reg = i < engine->whitelist.count ? + engine->whitelist.list[i].reg : + RING_NOPID(engine->mmio_base); + + return i915_mmio_reg_offset(reg); } -static void print_results(const struct whitelist *w, const u32 *results) +static void +print_results(const struct intel_engine_cs *engine, const u32 *results) { unsigned int i; for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { - u32 expected = get_whitelist_reg(w, i); + u32 expected = get_whitelist_reg(engine, i); u32 actual = results[i]; pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n", @@ -109,8 +118,7 @@ static void print_results(const struct whitelist *w, const u32 *results) } } -static int check_whitelist(const struct whitelist *w, - struct i915_gem_context *ctx, +static int check_whitelist(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { struct drm_i915_gem_object *results; @@ -138,11 +146,11 @@ static int check_whitelist(const struct whitelist *w, } for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { - u32 expected = get_whitelist_reg(w, i); + u32 expected = get_whitelist_reg(engine, i); u32 actual = vaddr[i]; if (expected != actual) { - print_results(w, vaddr); + print_results(engine, vaddr); pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n", i, expected, actual); @@ -159,66 +167,107 @@ out_put: static int do_device_reset(struct intel_engine_cs *engine) { - i915_reset(engine->i915, ENGINE_MASK(engine->id), NULL); + set_bit(I915_RESET_HANDOFF, &engine->i915->gpu_error.flags); + i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds"); return 0; } static int do_engine_reset(struct intel_engine_cs *engine) { - return i915_reset_engine(engine, NULL); + return i915_reset_engine(engine, "live_workarounds"); } -static int switch_to_scratch_context(struct intel_engine_cs *engine) +static int +switch_to_scratch_context(struct intel_engine_cs *engine, + struct igt_spinner *spin) { struct i915_gem_context *ctx; struct i915_request *rq; + int err = 0; ctx = kernel_context(engine->i915); if (IS_ERR(ctx)) return PTR_ERR(ctx); intel_runtime_pm_get(engine->i915); - rq = i915_request_alloc(engine, ctx); + + if (spin) + rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); + else + rq = i915_request_alloc(engine, ctx); + intel_runtime_pm_put(engine->i915); kernel_context_close(ctx); - if (IS_ERR(rq)) - return PTR_ERR(rq); + + if (IS_ERR(rq)) { + spin = NULL; + err = PTR_ERR(rq); + goto err; + } i915_request_add(rq); - return 0; + if (spin && !igt_wait_for_spinner(spin, rq)) { + pr_err("Spinner failed to start\n"); + err = -ETIMEDOUT; + } + +err: + if (err && spin) + igt_spinner_end(spin); + + return err; } static int check_whitelist_across_reset(struct intel_engine_cs *engine, int (*reset)(struct intel_engine_cs *), - const struct whitelist *w, const char *name) { + struct drm_i915_private *i915 = engine->i915; + bool want_spin = reset == do_engine_reset; struct i915_gem_context *ctx; + struct igt_spinner spin; int err; - ctx = kernel_context(engine->i915); + pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n", + engine->whitelist.count, name); + + if (want_spin) { + err = igt_spinner_init(&spin, i915); + if (err) + return err; + } + + ctx = kernel_context(i915); if (IS_ERR(ctx)) return PTR_ERR(ctx); - err = check_whitelist(w, ctx, engine); + err = check_whitelist(ctx, engine); if (err) { pr_err("Invalid whitelist *before* %s reset!\n", name); goto out; } - err = switch_to_scratch_context(engine); + err = switch_to_scratch_context(engine, want_spin ? &spin : NULL); if (err) goto out; + intel_runtime_pm_get(i915); err = reset(engine); + intel_runtime_pm_put(i915); + + if (want_spin) { + igt_spinner_end(&spin); + igt_spinner_fini(&spin); + } + if (err) { pr_err("%s reset failed\n", name); goto out; } - err = check_whitelist(w, ctx, engine); + err = check_whitelist(ctx, engine); if (err) { pr_err("Whitelist not preserved in context across %s reset!\n", name); @@ -227,11 +276,11 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, kernel_context_close(ctx); - ctx = kernel_context(engine->i915); + ctx = kernel_context(i915); if (IS_ERR(ctx)) return PTR_ERR(ctx); - err = check_whitelist(w, ctx, engine); + err = check_whitelist(ctx, engine); if (err) { pr_err("Invalid whitelist *after* %s reset in fresh context!\n", name); @@ -247,26 +296,18 @@ static int live_reset_whitelist(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine = i915->engine[RCS]; - struct i915_gpu_error *error = &i915->gpu_error; - struct whitelist w; int err = 0; /* If we reset the gpu, we should not lose the RING_NONPRIV */ - if (!engine) - return 0; - - if (!whitelist_build(engine, &w)) + if (!engine || engine->whitelist.count == 0) return 0; - pr_info("Checking %d whitelisted registers (RING_NONPRIV)\n", w.count); - - set_bit(I915_RESET_BACKOFF, &error->flags); - set_bit(I915_RESET_ENGINE + engine->id, &error->flags); + igt_global_reset_lock(i915); if (intel_has_reset_engine(i915)) { err = check_whitelist_across_reset(engine, - do_engine_reset, &w, + do_engine_reset, "engine"); if (err) goto out; @@ -274,22 +315,156 @@ static int live_reset_whitelist(void *arg) if (intel_has_gpu_reset(i915)) { err = check_whitelist_across_reset(engine, - do_device_reset, &w, + do_device_reset, "device"); if (err) goto out; } out: - clear_bit(I915_RESET_ENGINE + engine->id, &error->flags); - clear_bit(I915_RESET_BACKOFF, &error->flags); + igt_global_reset_unlock(i915); return err; } +static bool verify_gt_engine_wa(struct drm_i915_private *i915, const char *str) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + bool ok = true; + + ok &= intel_gt_verify_workarounds(i915, str); + + for_each_engine(engine, i915, id) + ok &= intel_engine_verify_workarounds(engine, str); + + return ok; +} + +static int +live_gpu_reset_gt_engine_workarounds(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct i915_gpu_error *error = &i915->gpu_error; + bool ok; + + if (!intel_has_gpu_reset(i915)) + return 0; + + pr_info("Verifying after GPU reset...\n"); + + igt_global_reset_lock(i915); + + ok = verify_gt_engine_wa(i915, "before reset"); + if (!ok) + goto out; + + intel_runtime_pm_get(i915); + set_bit(I915_RESET_HANDOFF, &error->flags); + i915_reset(i915, ALL_ENGINES, "live_workarounds"); + intel_runtime_pm_put(i915); + + ok = verify_gt_engine_wa(i915, "after reset"); + +out: + igt_global_reset_unlock(i915); + + return ok ? 0 : -ESRCH; +} + +static int +live_engine_reset_gt_engine_workarounds(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *ctx; + struct igt_spinner spin; + enum intel_engine_id id; + struct i915_request *rq; + int ret = 0; + + if (!intel_has_reset_engine(i915)) + return 0; + + ctx = kernel_context(i915); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + igt_global_reset_lock(i915); + + for_each_engine(engine, i915, id) { + bool ok; + + pr_info("Verifying after %s reset...\n", engine->name); + + ok = verify_gt_engine_wa(i915, "before reset"); + if (!ok) { + ret = -ESRCH; + goto err; + } + + intel_runtime_pm_get(i915); + i915_reset_engine(engine, "live_workarounds"); + intel_runtime_pm_put(i915); + + ok = verify_gt_engine_wa(i915, "after idle reset"); + if (!ok) { + ret = -ESRCH; + goto err; + } + + ret = igt_spinner_init(&spin, i915); + if (ret) + goto err; + + intel_runtime_pm_get(i915); + + rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + igt_spinner_fini(&spin); + intel_runtime_pm_put(i915); + goto err; + } + + i915_request_add(rq); + + if (!igt_wait_for_spinner(&spin, rq)) { + pr_err("Spinner failed to start\n"); + igt_spinner_fini(&spin); + intel_runtime_pm_put(i915); + ret = -ETIMEDOUT; + goto err; + } + + i915_reset_engine(engine, "live_workarounds"); + + intel_runtime_pm_put(i915); + + igt_spinner_end(&spin); + igt_spinner_fini(&spin); + + ok = verify_gt_engine_wa(i915, "after busy reset"); + if (!ok) { + ret = -ESRCH; + goto err; + } + } + +err: + igt_global_reset_unlock(i915); + kernel_context_close(ctx); + + igt_flush_test(i915, I915_WAIT_LOCKED); + + return ret; +} + int intel_workarounds_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_reset_whitelist), + SUBTEST(live_gpu_reset_gt_engine_workarounds), + SUBTEST(live_engine_reset_gt_engine_workarounds), }; int err; |