diff options
author | Mika Kuoppala <mika.kuoppala@linux.intel.com> | 2017-09-22 15:43:03 +0300 |
---|---|---|
committer | Mika Kuoppala <mika.kuoppala@intel.com> | 2017-09-25 11:33:23 +0300 |
commit | b620e870218ebe75b8221c7596b46e36d8329c85 (patch) | |
tree | a972aa2a25eebfb9c92b2537cbf16a6af1b2b9d0 /drivers/gpu/drm | |
parent | d27ffc1d00327c29b3aa97f941b42f0949f9e99f (diff) |
drm/i915: Make own struct for execlist items
Engine's execlist related items have been increasing to
a point where a separate struct is warranted. Carve execlist
specific items to a dedicated struct to add clarity.
v2: add kerneldoc and fix whitespace (Joonas, Chris)
v3: csb_mmio changes, rebase
v4: s/\b(el|execlist)\b/execlists/ (Joonas)
Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
Acked-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: MichaĆ Winiarski <michal.winiarski@intel.com> (v3)
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v3)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20170922124307.10914-1-mika.kuoppala@intel.com
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gpu_error.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_guc_submission.c | 31 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_engine_cs.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lrc.c | 100 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 100 |
8 files changed, 167 insertions, 99 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index b08ebed4e700..87e06da55a69 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -3323,7 +3323,7 @@ static int i915_engine_info(struct seq_file *m, void *unused) read = GEN8_CSB_READ_PTR(ptr); write = GEN8_CSB_WRITE_PTR(ptr); seq_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n", - read, engine->csb_head, + read, engine->execlists.csb_head, write, intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)), yesno(test_bit(ENGINE_IRQ_EXECLIST, @@ -3345,10 +3345,10 @@ static int i915_engine_info(struct seq_file *m, void *unused) } rcu_read_lock(); - for (idx = 0; idx < ARRAY_SIZE(engine->execlist_port); idx++) { + for (idx = 0; idx < ARRAY_SIZE(engine->execlists.port); idx++) { unsigned int count; - rq = port_unpack(&engine->execlist_port[idx], + rq = port_unpack(&engine->execlists.port[idx], &count); if (rq) { seq_printf(m, "\t\tELSP[%d] count=%d, ", @@ -3362,7 +3362,7 @@ static int i915_engine_info(struct seq_file *m, void *unused) rcu_read_unlock(); spin_lock_irq(&engine->timeline->lock); - for (rb = engine->execlist_first; rb; rb = rb_next(rb)){ + for (rb = engine->execlists.first; rb; rb = rb_next(rb)) { struct i915_priolist *p = rb_entry(rb, typeof(*p), node); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 12ce97d47afb..49bf5ddfa7fd 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2815,8 +2815,8 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine) * Turning off the engine->irq_tasklet until the reset is over * prevents the race. */ - tasklet_kill(&engine->irq_tasklet); - tasklet_disable(&engine->irq_tasklet); + tasklet_kill(&engine->execlists.irq_tasklet); + tasklet_disable(&engine->execlists.irq_tasklet); if (engine->irq_seqno_barrier) engine->irq_seqno_barrier(engine); @@ -2995,7 +2995,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv) void i915_gem_reset_finish_engine(struct intel_engine_cs *engine) { - tasklet_enable(&engine->irq_tasklet); + tasklet_enable(&engine->execlists.irq_tasklet); kthread_unpark(engine->breadcrumbs.signaler); } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 6cd5eba643e8..20a1f034bf95 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1327,10 +1327,10 @@ static void engine_record_requests(struct intel_engine_cs *engine, static void error_record_engine_execlists(struct intel_engine_cs *engine, struct drm_i915_error_engine *ee) { - const struct execlist_port *port = engine->execlist_port; + const struct execlist_port *port = engine->execlists.port; unsigned int n; - for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) { + for (n = 0; n < ARRAY_SIZE(engine->execlists.port); n++) { struct drm_i915_gem_request *rq = port_request(&port[n]); if (!rq) diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 06a26c610806..bce3f1b5892b 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -494,11 +494,12 @@ static void i915_guc_submit(struct intel_engine_cs *engine) struct drm_i915_private *dev_priv = engine->i915; struct intel_guc *guc = &dev_priv->guc; struct i915_guc_client *client = guc->execbuf_client; - struct execlist_port *port = engine->execlist_port; - unsigned int engine_id = engine->id; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct execlist_port *port = execlists->port; + const unsigned int engine_id = engine->id; unsigned int n; - for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) { + for (n = 0; n < ARRAY_SIZE(execlists->port); n++) { struct drm_i915_gem_request *rq; unsigned int count; @@ -558,7 +559,8 @@ static void port_assign(struct execlist_port *port, static void i915_guc_dequeue(struct intel_engine_cs *engine) { - struct execlist_port *port = engine->execlist_port; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct execlist_port *port = execlists->port; struct drm_i915_gem_request *last = NULL; bool submit = false; struct rb_node *rb; @@ -567,15 +569,15 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine) port++; spin_lock_irq(&engine->timeline->lock); - rb = engine->execlist_first; - GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb); + rb = execlists->first; + GEM_BUG_ON(rb_first(&execlists->queue) != rb); while (rb) { struct i915_priolist *p = rb_entry(rb, typeof(*p), node); struct drm_i915_gem_request *rq, *rn; list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) { if (last && rq->ctx != last->ctx) { - if (port != engine->execlist_port) { + if (port != execlists->port) { __list_del_many(&p->requests, &rq->priotree.link); goto done; @@ -596,13 +598,13 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine) } rb = rb_next(rb); - rb_erase(&p->node, &engine->execlist_queue); + rb_erase(&p->node, &execlists->queue); INIT_LIST_HEAD(&p->requests); if (p->priority != I915_PRIORITY_NORMAL) kmem_cache_free(engine->i915->priorities, p); } done: - engine->execlist_first = rb; + execlists->first = rb; if (submit) { port_assign(port, last); i915_guc_submit(engine); @@ -612,8 +614,8 @@ done: static void i915_guc_irq_handler(unsigned long data) { - struct intel_engine_cs *engine = (struct intel_engine_cs *)data; - struct execlist_port *port = engine->execlist_port; + struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; + struct execlist_port *port = engine->execlists.port; struct drm_i915_gem_request *rq; rq = port_request(&port[0]); @@ -1144,7 +1146,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv) * and it is guaranteed that it will remove the work item from the * queue before our request is completed. */ - BUILD_BUG_ON(ARRAY_SIZE(engine->execlist_port) * + BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.port) * sizeof(struct guc_wq_item) * I915_NUM_ENGINES > GUC_WQ_SIZE); @@ -1175,14 +1177,15 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv) guc_interrupts_capture(dev_priv); for_each_engine(engine, dev_priv, id) { + struct intel_engine_execlists * const execlists = &engine->execlists; /* The tasklet was initialised by execlists, and may be in * a state of flux (across a reset) and so we just want to * take over the callback without changing any other state * in the tasklet. */ - engine->irq_tasklet.func = i915_guc_irq_handler; + execlists->irq_tasklet.func = i915_guc_irq_handler; clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); - tasklet_schedule(&engine->irq_tasklet); + tasklet_schedule(&execlists->irq_tasklet); } return 0; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b1bab7605db9..af82bd721dbc 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1346,10 +1346,11 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, static void gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) { + struct intel_engine_execlists * const execlists = &engine->execlists; bool tasklet = false; if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) { - if (port_count(&engine->execlist_port[0])) { + if (port_count(&execlists->port[0])) { __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); tasklet = true; } @@ -1361,7 +1362,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) } if (tasklet) - tasklet_hi_schedule(&engine->irq_tasklet); + tasklet_hi_schedule(&execlists->irq_tasklet); } static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 020e4c6c0192..bf132266a007 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -393,8 +393,8 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine) */ void intel_engine_setup_common(struct intel_engine_cs *engine) { - engine->execlist_queue = RB_ROOT; - engine->execlist_first = NULL; + engine->execlists.queue = RB_ROOT; + engine->execlists.first = NULL; intel_engine_init_timeline(engine); intel_engine_init_hangcheck(engine); @@ -1475,11 +1475,11 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) return false; /* Both ports drained, no more ELSP submission? */ - if (port_request(&engine->execlist_port[0])) + if (port_request(&engine->execlists.port[0])) return false; /* ELSP is empty, but there are ready requests? */ - if (READ_ONCE(engine->execlist_first)) + if (READ_ONCE(engine->execlists.first)) return false; /* Ring stopped? */ @@ -1528,8 +1528,8 @@ void intel_engines_mark_idle(struct drm_i915_private *i915) for_each_engine(engine, i915, id) { intel_engine_disarm_breadcrumbs(engine); i915_gem_batch_pool_fini(&engine->batch_pool); - tasklet_kill(&engine->irq_tasklet); - engine->no_priolist = false; + tasklet_kill(&engine->execlists.irq_tasklet); + engine->execlists.no_priolist = false; } } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 955c87999280..4f202b840e3d 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -291,17 +291,18 @@ lookup_priolist(struct intel_engine_cs *engine, struct i915_priotree *pt, int prio) { + struct intel_engine_execlists * const execlists = &engine->execlists; struct i915_priolist *p; struct rb_node **parent, *rb; bool first = true; - if (unlikely(engine->no_priolist)) + if (unlikely(execlists->no_priolist)) prio = I915_PRIORITY_NORMAL; find_priolist: /* most positive priority is scheduled first, equal priorities fifo */ rb = NULL; - parent = &engine->execlist_queue.rb_node; + parent = &execlists->queue.rb_node; while (*parent) { rb = *parent; p = rb_entry(rb, typeof(*p), node); @@ -316,7 +317,7 @@ find_priolist: } if (prio == I915_PRIORITY_NORMAL) { - p = &engine->default_priolist; + p = &execlists->default_priolist; } else { p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC); /* Convert an allocation failure to a priority bump */ @@ -331,7 +332,7 @@ find_priolist: * requests, so if userspace lied about their * dependencies that reordering may be visible. */ - engine->no_priolist = true; + execlists->no_priolist = true; goto find_priolist; } } @@ -339,10 +340,10 @@ find_priolist: p->priority = prio; INIT_LIST_HEAD(&p->requests); rb_link_node(&p->node, rb, parent); - rb_insert_color(&p->node, &engine->execlist_queue); + rb_insert_color(&p->node, &execlists->queue); if (first) - engine->execlist_first = &p->node; + execlists->first = &p->node; return ptr_pack_bits(p, first, 1); } @@ -393,12 +394,12 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq) static void execlists_submit_ports(struct intel_engine_cs *engine) { - struct execlist_port *port = engine->execlist_port; + struct execlist_port *port = engine->execlists.port; u32 __iomem *elsp = engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine)); unsigned int n; - for (n = ARRAY_SIZE(engine->execlist_port); n--; ) { + for (n = ARRAY_SIZE(engine->execlists.port); n--; ) { struct drm_i915_gem_request *rq; unsigned int count; u64 desc; @@ -453,7 +454,7 @@ static void port_assign(struct execlist_port *port, static void execlists_dequeue(struct intel_engine_cs *engine) { struct drm_i915_gem_request *last; - struct execlist_port *port = engine->execlist_port; + struct execlist_port *port = engine->execlists.port; struct rb_node *rb; bool submit = false; @@ -491,8 +492,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) */ spin_lock_irq(&engine->timeline->lock); - rb = engine->execlist_first; - GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb); + rb = engine->execlists.first; + GEM_BUG_ON(rb_first(&engine->execlists.queue) != rb); while (rb) { struct i915_priolist *p = rb_entry(rb, typeof(*p), node); struct drm_i915_gem_request *rq, *rn; @@ -515,7 +516,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * combine this request with the last, then we * are done. */ - if (port != engine->execlist_port) { + if (port != engine->execlists.port) { __list_del_many(&p->requests, &rq->priotree.link); goto done; @@ -552,13 +553,13 @@ static void execlists_dequeue(struct intel_engine_cs *engine) } rb = rb_next(rb); - rb_erase(&p->node, &engine->execlist_queue); + rb_erase(&p->node, &engine->execlists.queue); INIT_LIST_HEAD(&p->requests); if (p->priority != I915_PRIORITY_NORMAL) kmem_cache_free(engine->i915->priorities, p); } done: - engine->execlist_first = rb; + engine->execlists.first = rb; if (submit) port_assign(port, last); spin_unlock_irq(&engine->timeline->lock); @@ -569,7 +570,8 @@ done: static void execlists_cancel_requests(struct intel_engine_cs *engine) { - struct execlist_port *port = engine->execlist_port; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct execlist_port *port = execlists->port; struct drm_i915_gem_request *rq, *rn; struct rb_node *rb; unsigned long flags; @@ -578,9 +580,9 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) spin_lock_irqsave(&engine->timeline->lock, flags); /* Cancel the requests on the HW and clear the ELSP tracker. */ - for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) + for (n = 0; n < ARRAY_SIZE(execlists->port); n++) i915_gem_request_put(port_request(&port[n])); - memset(engine->execlist_port, 0, sizeof(engine->execlist_port)); + memset(execlists->port, 0, sizeof(execlists->port)); /* Mark all executing requests as skipped. */ list_for_each_entry(rq, &engine->timeline->requests, link) { @@ -590,7 +592,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) } /* Flush the queued requests to the timeline list (for retiring). */ - rb = engine->execlist_first; + rb = execlists->first; while (rb) { struct i915_priolist *p = rb_entry(rb, typeof(*p), node); @@ -603,7 +605,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) } rb = rb_next(rb); - rb_erase(&p->node, &engine->execlist_queue); + rb_erase(&p->node, &execlists->queue); INIT_LIST_HEAD(&p->requests); if (p->priority != I915_PRIORITY_NORMAL) kmem_cache_free(engine->i915->priorities, p); @@ -611,8 +613,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) /* Remaining _unready_ requests will be nop'ed when submitted */ - engine->execlist_queue = RB_ROOT; - engine->execlist_first = NULL; + execlists->queue = RB_ROOT; + execlists->first = NULL; GEM_BUG_ON(port_isset(&port[0])); /* @@ -628,7 +630,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) static bool execlists_elsp_ready(const struct intel_engine_cs *engine) { - const struct execlist_port *port = engine->execlist_port; + const struct execlist_port *port = engine->execlists.port; return port_count(&port[0]) + port_count(&port[1]) < 2; } @@ -639,8 +641,9 @@ static bool execlists_elsp_ready(const struct intel_engine_cs *engine) */ static void intel_lrc_irq_handler(unsigned long data) { - struct intel_engine_cs *engine = (struct intel_engine_cs *)data; - struct execlist_port *port = engine->execlist_port; + struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct execlist_port *port = execlists->port; struct drm_i915_private *dev_priv = engine->i915; /* We can skip acquiring intel_runtime_pm_get() here as it was taken @@ -652,7 +655,7 @@ static void intel_lrc_irq_handler(unsigned long data) */ GEM_BUG_ON(!dev_priv->gt.awake); - intel_uncore_forcewake_get(dev_priv, engine->fw_domains); + intel_uncore_forcewake_get(dev_priv, execlists->fw_domains); /* Prefer doing test_and_clear_bit() as a two stage operation to avoid * imposing the cost of a locked atomic transaction when submitting a @@ -665,10 +668,10 @@ static void intel_lrc_irq_handler(unsigned long data) unsigned int head, tail; /* However GVT emulation depends upon intercepting CSB mmio */ - if (unlikely(engine->csb_use_mmio)) { + if (unlikely(execlists->csb_use_mmio)) { buf = (u32 * __force) (dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0))); - engine->csb_head = -1; /* force mmio read of CSB ptrs */ + execlists->csb_head = -1; /* force mmio read of CSB ptrs */ } /* The write will be ordered by the uncached read (itself @@ -682,19 +685,20 @@ static void intel_lrc_irq_handler(unsigned long data) * is set and we do a new loop. */ __clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); - if (unlikely(engine->csb_head == -1)) { /* following a reset */ + if (unlikely(execlists->csb_head == -1)) { /* following a reset */ head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine))); tail = GEN8_CSB_WRITE_PTR(head); head = GEN8_CSB_READ_PTR(head); - engine->csb_head = head; + execlists->csb_head = head; } else { const int write_idx = intel_hws_csb_write_index(dev_priv) - I915_HWS_CSB_BUF0_INDEX; - head = engine->csb_head; + head = execlists->csb_head; tail = READ_ONCE(buf[write_idx]); } + while (head != tail) { struct drm_i915_gem_request *rq; unsigned int status; @@ -748,8 +752,8 @@ static void intel_lrc_irq_handler(unsigned long data) !(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); } - if (head != engine->csb_head) { - engine->csb_head = head; + if (head != execlists->csb_head) { + execlists->csb_head = head; writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8), dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine))); } @@ -758,7 +762,7 @@ static void intel_lrc_irq_handler(unsigned long data) if (execlists_elsp_ready(engine)) execlists_dequeue(engine); - intel_uncore_forcewake_put(dev_priv, engine->fw_domains); + intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); } static void insert_request(struct intel_engine_cs *engine, @@ -769,7 +773,7 @@ static void insert_request(struct intel_engine_cs *engine, list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests); if (ptr_unmask_bits(p, 1) && execlists_elsp_ready(engine)) - tasklet_hi_schedule(&engine->irq_tasklet); + tasklet_hi_schedule(&engine->execlists.irq_tasklet); } static void execlists_submit_request(struct drm_i915_gem_request *request) @@ -782,7 +786,7 @@ static void execlists_submit_request(struct drm_i915_gem_request *request) insert_request(engine, &request->priotree, request->priotree.priority); - GEM_BUG_ON(!engine->execlist_first); + GEM_BUG_ON(!engine->execlists.first); GEM_BUG_ON(list_empty(&request->priotree.link)); spin_unlock_irqrestore(&engine->timeline->lock, flags); @@ -1289,6 +1293,7 @@ static u8 gtiir[] = { static int gen8_init_common_ring(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; + struct intel_engine_execlists * const execlists = &engine->execlists; int ret; ret = intel_mocs_init_engine(engine); @@ -1321,11 +1326,11 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]), GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift); clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); - engine->csb_head = -1; + execlists->csb_head = -1; /* After a GPU reset, we may have requests to replay */ - if (!i915_modparams.enable_guc_submission && engine->execlist_first) - tasklet_schedule(&engine->irq_tasklet); + if (!i915_modparams.enable_guc_submission && execlists->first) + tasklet_schedule(&execlists->irq_tasklet); return 0; } @@ -1366,7 +1371,8 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine) static void reset_common_ring(struct intel_engine_cs *engine, struct drm_i915_gem_request *request) { - struct execlist_port *port = engine->execlist_port; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct execlist_port *port = execlists->port; struct drm_i915_gem_request *rq, *rn; struct intel_context *ce; unsigned long flags; @@ -1383,9 +1389,9 @@ static void reset_common_ring(struct intel_engine_cs *engine, * guessing the missed context-switch events by looking at what * requests were completed. */ - for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) + for (n = 0; n < ARRAY_SIZE(execlists->port); n++) i915_gem_request_put(port_request(&port[n])); - memset(engine->execlist_port, 0, sizeof(engine->execlist_port)); + memset(execlists->port, 0, sizeof(execlists->port)); /* Push back any incomplete requests for replay after the reset. */ list_for_each_entry_safe_reverse(rq, rn, @@ -1719,8 +1725,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) * Tasklet cannot be active at this point due intel_mark_active/idle * so this is just for documentation. */ - if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state))) - tasklet_kill(&engine->irq_tasklet); + if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->execlists.irq_tasklet.state))) + tasklet_kill(&engine->execlists.irq_tasklet); dev_priv = engine->i915; @@ -1744,7 +1750,7 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine) engine->submit_request = execlists_submit_request; engine->cancel_requests = execlists_cancel_requests; engine->schedule = execlists_schedule; - engine->irq_tasklet.func = intel_lrc_irq_handler; + engine->execlists.irq_tasklet.func = intel_lrc_irq_handler; } static void @@ -1806,7 +1812,7 @@ logical_ring_setup(struct intel_engine_cs *engine) /* Intentionally left blank. */ engine->buffer = NULL; - engine->csb_use_mmio = irq_handler_force_mmio(dev_priv); + engine->execlists.csb_use_mmio = irq_handler_force_mmio(dev_priv); fw_domains = intel_uncore_forcewake_for_reg(dev_priv, RING_ELSP(engine), @@ -1820,9 +1826,9 @@ logical_ring_setup(struct intel_engine_cs *engine) RING_CONTEXT_STATUS_BUF_BASE(engine), FW_REG_READ); - engine->fw_domains = fw_domains; + engine->execlists.fw_domains = fw_domains; - tasklet_init(&engine->irq_tasklet, + tasklet_init(&engine->execlists.irq_tasklet, intel_lrc_irq_handler, (unsigned long)engine); logical_ring_default_vfuncs(engine); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 138116a3b537..421e769adb79 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -184,6 +184,84 @@ struct i915_priolist { int priority; }; +/** + * struct intel_engine_execlists - execlist submission queue and port state + * + * The struct intel_engine_execlists represents the combined logical state of + * driver and the hardware state for execlist mode of submission. + */ +struct intel_engine_execlists { + /** + * @irq_tasklet: softirq tasklet for bottom handler + */ + struct tasklet_struct irq_tasklet; + + /** + * @default_priolist: priority list for I915_PRIORITY_NORMAL + */ + struct i915_priolist default_priolist; + + /** + * @no_priolist: priority lists disabled + */ + bool no_priolist; + + /** + * @port: execlist port states + * + * For each hardware ELSP (ExecList Submission Port) we keep + * track of the last request and the number of times we submitted + * that port to hw. We then count the number of times the hw reports + * a context completion or preemption. As only one context can + * be active on hw, we limit resubmission of context to port[0]. This + * is called Lite Restore, of the context. + */ + struct execlist_port { + /** + * @request_count: combined request and submission count + */ + struct drm_i915_gem_request *request_count; +#define EXECLIST_COUNT_BITS 2 +#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS) +#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS) +#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS) +#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS) +#define port_set(p, packed) ((p)->request_count = (packed)) +#define port_isset(p) ((p)->request_count) +#define port_index(p, e) ((p) - (e)->execlists.port) + + /** + * @context_id: context ID for port + */ + GEM_DEBUG_DECL(u32 context_id); + } port[2]; + + /** + * @queue: queue of requests, in priority lists + */ + struct rb_root queue; + + /** + * @first: leftmost level in priority @queue + */ + struct rb_node *first; + + /** + * @fw_domains: forcewake domains for irq tasklet + */ + unsigned int fw_domains; + + /** + * @csb_head: context status buffer head + */ + unsigned int csb_head; + + /** + * @csb_use_mmio: access csb through mmio, instead of hwsp + */ + bool csb_use_mmio; +}; + #define INTEL_ENGINE_CS_MAX_NAME 8 struct intel_engine_cs { @@ -380,27 +458,7 @@ struct intel_engine_cs { u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs); } semaphore; - /* Execlists */ - struct tasklet_struct irq_tasklet; - struct i915_priolist default_priolist; - bool no_priolist; - struct execlist_port { - struct drm_i915_gem_request *request_count; -#define EXECLIST_COUNT_BITS 2 -#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS) -#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS) -#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS) -#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS) -#define port_set(p, packed) ((p)->request_count = (packed)) -#define port_isset(p) ((p)->request_count) -#define port_index(p, e) ((p) - (e)->execlist_port) - GEM_DEBUG_DECL(u32 context_id); - } execlist_port[2]; - struct rb_root execlist_queue; - struct rb_node *execlist_first; - unsigned int fw_domains; - unsigned int csb_head; - bool csb_use_mmio; + struct intel_engine_execlists execlists; /* Contexts are pinned whilst they are active on the GPU. The last * context executed remains active whilst the GPU is idle - the |