summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2018-04-24 09:16:00 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2018-04-24 14:00:37 +0100
commit56021f48dbea69a00b96a53d6450b0950f9c811f (patch)
treebcf16f39f8e0514f4060552ea0d5206d2ed36940 /drivers
parent247870ac8ea72916cd26f89e9bc211b97141ecd5 (diff)
drm/i915: Don't dump umpteen thousand requests
If we have more than a few, possibly several thousand request in the queue, don't show the central portion, just the first few and the last being executed and/or queued. The first few should be enough to help identify a problem in execution, and most often comparing the first/last in the queue is enough to identify problems in the scheduling. We may need some fine tuning to set MAX_REQUESTS_TO_SHOW for common debug scenarios, but for the moment if we can avoiding spending more than a few seconds dumping the GPU state that will avoid a nasty livelock (where hangcheck spends so long dumping the state, it fires again and starts to dump the state again in parallel, ad infinitum). v2: Remember to print last not the stale rq iter after the loop. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180424081600.27544-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c43
1 files changed, 38 insertions, 5 deletions
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 66cddd059666..2398ea71e747 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1307,11 +1307,13 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...)
{
+ const int MAX_REQUESTS_TO_SHOW = 8;
struct intel_breadcrumbs * const b = &engine->breadcrumbs;
const struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_gpu_error * const error = &engine->i915->gpu_error;
- struct i915_request *rq;
+ struct i915_request *rq, *last;
struct rb_node *rb;
+ int count;
if (header) {
va_list ap;
@@ -1378,16 +1380,47 @@ void intel_engine_dump(struct intel_engine_cs *engine,
}
spin_lock_irq(&engine->timeline->lock);
- list_for_each_entry(rq, &engine->timeline->requests, link)
- print_request(m, rq, "\t\tE ");
+
+ last = NULL;
+ count = 0;
+ list_for_each_entry(rq, &engine->timeline->requests, link) {
+ if (count++ < MAX_REQUESTS_TO_SHOW - 1)
+ print_request(m, rq, "\t\tE ");
+ else
+ last = rq;
+ }
+ if (last) {
+ if (count > MAX_REQUESTS_TO_SHOW) {
+ drm_printf(m,
+ "\t\t...skipping %d executing requests...\n",
+ count - MAX_REQUESTS_TO_SHOW);
+ }
+ print_request(m, last, "\t\tE ");
+ }
+
+ last = NULL;
+ count = 0;
drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
for (rb = execlists->first; rb; rb = rb_next(rb)) {
struct i915_priolist *p =
rb_entry(rb, typeof(*p), node);
- list_for_each_entry(rq, &p->requests, sched.link)
- print_request(m, rq, "\t\tQ ");
+ list_for_each_entry(rq, &p->requests, sched.link) {
+ if (count++ < MAX_REQUESTS_TO_SHOW - 1)
+ print_request(m, rq, "\t\tQ ");
+ else
+ last = rq;
+ }
}
+ if (last) {
+ if (count > MAX_REQUESTS_TO_SHOW) {
+ drm_printf(m,
+ "\t\t...skipping %d queued requests...\n",
+ count - MAX_REQUESTS_TO_SHOW);
+ }
+ print_request(m, last, "\t\tQ ");
+ }
+
spin_unlock_irq(&engine->timeline->lock);
spin_lock_irq(&b->rb_lock);