summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/uc
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-08-12 21:36:26 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2019-08-13 07:54:39 +0100
commit5f15c1e6e181636f4b2fe7a67d582356f57cfd2c (patch)
tree9e50373ba62bdcaa83448f704b0ee857b6715696 /drivers/gpu/drm/i915/gt/uc
parent478ffad6d6902298802b5b8044e542739fcb0ed1 (diff)
drm/i915/guc: Use a local cancel_port_requests
Since execlists and the guc have diverged in their port tracking, we cannot simply reuse the execlists cancellation code as it leads to unbalanced reference counting. Use a local, simpler routine for the guc. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190812203626.3948-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/gt/uc')
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c37
1 files changed, 27 insertions, 10 deletions
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index deb054eeb37c..54c3ae2c49ff 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -517,11 +517,14 @@ static struct i915_request *schedule_in(struct i915_request *rq, int idx)
{
trace_i915_request_in(rq, idx);
- if (!rq->hw_context->inflight)
- rq->hw_context->inflight = rq->engine;
- intel_context_inflight_inc(rq->hw_context);
- intel_gt_pm_get(rq->engine->gt);
+ /*
+ * Currently we are not tracking the rq->context being inflight
+ * (ce->inflight = rq->engine). It is only used by the execlists
+ * backend at the moment, a similar counting strategy would be
+ * required if we generalise the inflight tracking.
+ */
+ intel_gt_pm_get(rq->engine->gt);
return i915_request_get(rq);
}
@@ -529,10 +532,6 @@ static void schedule_out(struct i915_request *rq)
{
trace_i915_request_out(rq);
- intel_context_inflight_dec(rq->hw_context);
- if (!intel_context_inflight_count(rq->hw_context))
- rq->hw_context->inflight = NULL;
-
intel_gt_pm_put(rq->engine->gt);
i915_request_put(rq);
}
@@ -556,6 +555,11 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
last = NULL;
}
+ /*
+ * We write directly into the execlists->inflight queue and don't use
+ * the execlists->pending queue, as we don't have a distinct switch
+ * event.
+ */
port = first;
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
@@ -636,6 +640,19 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
__tasklet_disable_sync_once(&execlists->tasklet);
}
+static void
+cancel_port_requests(struct intel_engine_execlists * const execlists)
+{
+ struct i915_request * const *port, *rq;
+
+ /* Note we are only using the inflight and not the pending queue */
+
+ for (port = execlists->active; (rq = *port); port++)
+ schedule_out(rq);
+ execlists->active =
+ memset(execlists->inflight, 0, sizeof(execlists->inflight));
+}
+
static void guc_reset(struct intel_engine_cs *engine, bool stalled)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -644,7 +661,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled)
spin_lock_irqsave(&engine->active.lock, flags);
- execlists_cancel_port_requests(execlists);
+ cancel_port_requests(execlists);
/* Push back any incomplete requests for replay after the reset. */
rq = execlists_unwind_incomplete_requests(execlists);
@@ -687,7 +704,7 @@ static void guc_cancel_requests(struct intel_engine_cs *engine)
spin_lock_irqsave(&engine->active.lock, flags);
/* Cancel the requests on the HW and clear the ELSP tracker. */
- execlists_cancel_port_requests(execlists);
+ cancel_port_requests(execlists);
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link) {