diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2019-01-09 21:59:32 +0000 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2019-01-09 22:23:31 +0000 |
commit | d22ba0cb1ffeffa5bf37b7cfab7958706daffee4 (patch) | |
tree | 4b94fd921be0a7f03911d8623ba56102fc2c1789 /drivers | |
parent | 4e8052af858a6ea2cf656bdb6dbcf16dd87a39c1 (diff) |
drm/i915: Reduce i915_request_alloc retirement to local context
In the continual quest to reduce the amount of global work required when
submitting requests, replace i915_retire_requests() after allocation
failure to retiring just our ring.
v2: Don't forget the list iteration included an early break, so we would
never throttle on the last request in the ring/timeline.
v3: Use the common ring_retire_requests()
References: 11abf0c5a021 ("drm/i915: Limit the backpressure for i915_request allocation")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190109215932.26454-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/i915_request.c | 55 |
1 files changed, 33 insertions, 22 deletions
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 346418c942a2..f3c3593362ec 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -477,6 +477,38 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) return NOTIFY_DONE; } +static void ring_retire_requests(struct intel_ring *ring) +{ + struct i915_request *rq, *rn; + + list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) { + if (!i915_request_completed(rq)) + break; + + i915_request_retire(rq); + } +} + +static noinline struct i915_request * +i915_request_alloc_slow(struct intel_context *ce) +{ + struct intel_ring *ring = ce->ring; + struct i915_request *rq; + + if (list_empty(&ring->request_list)) + goto out; + + /* Ratelimit ourselves to prevent oom from malicious clients */ + rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link); + cond_synchronize_rcu(rq->rcustate); + + /* Retire our old requests in the hope that we free some */ + ring_retire_requests(ring); + +out: + return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL); +} + /** * i915_request_alloc - allocate a request structure * @@ -559,15 +591,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) rq = kmem_cache_alloc(i915->requests, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); if (unlikely(!rq)) { - i915_retire_requests(i915); - - /* Ratelimit ourselves to prevent oom from malicious clients */ - rq = i915_gem_active_raw(&ce->ring->timeline->last_request, - &i915->drm.struct_mutex); - if (rq) - cond_synchronize_rcu(rq->rcustate); - - rq = kmem_cache_alloc(i915->requests, GFP_KERNEL); + rq = i915_request_alloc_slow(ce); if (!rq) { ret = -ENOMEM; goto err_unreserve; @@ -1218,19 +1242,6 @@ complete: return timeout; } -static void ring_retire_requests(struct intel_ring *ring) -{ - struct i915_request *request, *next; - - list_for_each_entry_safe(request, next, - &ring->request_list, ring_link) { - if (!i915_request_completed(request)) - break; - - i915_request_retire(request); - } -} - void i915_retire_requests(struct drm_i915_private *i915) { struct intel_ring *ring, *tmp; |