diff options
author | Ville Syrjälä <ville.syrjala@linux.intel.com> | 2017-08-18 21:37:00 +0300 |
---|---|---|
committer | Ville Syrjälä <ville.syrjala@linux.intel.com> | 2017-09-14 17:15:51 +0300 |
commit | af722d280e8551e918b22e96f487824935470c9a (patch) | |
tree | f4900c0e673186b68c3d60a3832aa3879ea1cf63 | |
parent | eb64343ca6dd03241579400e037f0e15e6831b0c (diff) |
drm/i915: Rewrite GMCH irq handlers to avoid loops
Eliminate the loops from the gen2-3 irq handlers. Since we don't use
MSI anymore on these platforms, and thus the CPU interrupt will be level
triggered, we shouldn't need to play any tricks with IER to induce edges
from IIR. IIR itself still detects only edges from PIPESTAT & co. on
gen4 but since IIR is double buffered and we only clear one bit per irq
handler invocation we can use the normal "clear PIPESTAT & co. -> clear
IIR" approach to ack the interrupts. On gen2 everything is level
triggered, and gen3 presumably follows either the gen2 or gen4 approach
since nothing else would really make sense.
v2: Drop the IER tricks since we no longer use MSI
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20170818183705.27850-12-ville.syrjala@linux.intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 143 |
1 files changed, 51 insertions, 92 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 27fd8e91084d..26569a00b40c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3697,8 +3697,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; struct drm_i915_private *dev_priv = to_i915(dev); - u16 iir, new_iir; - irqreturn_t ret; + irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) return IRQ_NONE; @@ -3706,34 +3705,31 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) /* IRQs are synced during runtime_suspend, we don't require a wakeref */ disable_rpm_wakeref_asserts(dev_priv); - ret = IRQ_NONE; - iir = I915_READ16(IIR); - if (iir == 0) - goto out; - - while (iir) { + do { u32 pipe_stats[I915_MAX_PIPES] = {}; + u16 iir; - if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); + iir = I915_READ16(IIR); + if (iir == 0) + break; + + ret = IRQ_HANDLED; /* Call regardless, as some status bits might not be * signalled in iir */ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); I915_WRITE16(IIR, iir); - new_iir = I915_READ16(IIR); /* Flush posted writes */ if (iir & I915_USER_INTERRUPT) notify_ring(dev_priv->engine[RCS]); - i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); + if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) + DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); - iir = new_iir; - } - ret = IRQ_HANDLED; + i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); + } while (0); -out: enable_rpm_wakeref_asserts(dev_priv); return ret; @@ -3809,8 +3805,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; struct drm_i915_private *dev_priv = to_i915(dev); - u32 iir, new_iir; - int ret = IRQ_NONE; + irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) return IRQ_NONE; @@ -3818,55 +3813,38 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) /* IRQs are synced during runtime_suspend, we don't require a wakeref */ disable_rpm_wakeref_asserts(dev_priv); - iir = I915_READ(IIR); do { u32 pipe_stats[I915_MAX_PIPES] = {}; - bool irq_received = iir != 0; + u32 hotplug_status = 0; + u32 iir; - if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); + iir = I915_READ(IIR); + if (iir == 0) + break; + + ret = IRQ_HANDLED; + + if (I915_HAS_HOTPLUG(dev_priv) && + iir & I915_DISPLAY_PORT_INTERRUPT) + hotplug_status = i9xx_hpd_irq_ack(dev_priv); /* Call regardless, as some status bits might not be * signalled in iir */ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); - if (!irq_received) - break; - - /* Consume port. Then clear IIR or we'll miss events */ - if (I915_HAS_HOTPLUG(dev_priv) && - iir & I915_DISPLAY_PORT_INTERRUPT) { - u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); - if (hotplug_status) - i9xx_hpd_irq_handler(dev_priv, hotplug_status); - } - I915_WRITE(IIR, iir); - new_iir = I915_READ(IIR); /* Flush posted writes */ if (iir & I915_USER_INTERRUPT) notify_ring(dev_priv->engine[RCS]); - i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); + if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) + DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); - /* With MSI, interrupts are only generated when iir - * transitions from zero to nonzero. If another bit got - * set while we were handling the existing iir bits, then - * we would never get another interrupt. - * - * This is fine on non-MSI as well, as if we hit this path - * we avoid exiting the interrupt handler only to generate - * another one. - * - * Note that for MSI this could cause a stray interrupt report - * if an interrupt landed in the time between writing IIR and - * the posting read. This should be rare enough to never - * trigger the 99% of 100,000 interrupts test for disabling - * stray interrupts. - */ - ret = IRQ_HANDLED; - iir = new_iir; - } while (iir); + if (hotplug_status) + i9xx_hpd_irq_handler(dev_priv, hotplug_status); + + i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); + } while (0); enable_rpm_wakeref_asserts(dev_priv); @@ -3987,8 +3965,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; struct drm_i915_private *dev_priv = to_i915(dev); - u32 iir, new_iir; - int ret = IRQ_NONE; + irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) return IRQ_NONE; @@ -3996,58 +3973,40 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) /* IRQs are synced during runtime_suspend, we don't require a wakeref */ disable_rpm_wakeref_asserts(dev_priv); - iir = I915_READ(IIR); - - for (;;) { + do { u32 pipe_stats[I915_MAX_PIPES] = {}; - bool irq_received = iir != 0; - - if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); - - /* Call regardless, as some status bits might not be - * signalled in iir */ - i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); + u32 hotplug_status = 0; + u32 iir; - if (!irq_received) + iir = I915_READ(IIR); + if (iir == 0) break; ret = IRQ_HANDLED; - /* Consume port. Then clear IIR or we'll miss events */ - if (iir & I915_DISPLAY_PORT_INTERRUPT) { - u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); - if (hotplug_status) - i9xx_hpd_irq_handler(dev_priv, hotplug_status); - } + if (iir & I915_DISPLAY_PORT_INTERRUPT) + hotplug_status = i9xx_hpd_irq_ack(dev_priv); + + /* Call regardless, as some status bits might not be + * signalled in iir */ + i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); I915_WRITE(IIR, iir); - new_iir = I915_READ(IIR); /* Flush posted writes */ if (iir & I915_USER_INTERRUPT) notify_ring(dev_priv->engine[RCS]); + if (iir & I915_BSD_USER_INTERRUPT) notify_ring(dev_priv->engine[VCS]); - i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); + if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) + DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); - /* With MSI, interrupts are only generated when iir - * transitions from zero to nonzero. If another bit got - * set while we were handling the existing iir bits, then - * we would never get another interrupt. - * - * This is fine on non-MSI as well, as if we hit this path - * we avoid exiting the interrupt handler only to generate - * another one. - * - * Note that for MSI this could cause a stray interrupt report - * if an interrupt landed in the time between writing IIR and - * the posting read. This should be rare enough to never - * trigger the 99% of 100,000 interrupts test for disabling - * stray interrupts. - */ - iir = new_iir; - } + if (hotplug_status) + i9xx_hpd_irq_handler(dev_priv, hotplug_status); + + i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); + } while (0); enable_rpm_wakeref_asserts(dev_priv); |