summaryrefslogtreecommitdiff
path: root/drivers/spi/spi.c
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2020-07-15 17:36:10 +0100
committerMark Brown <broonie@kernel.org>2020-07-17 00:55:23 +0100
commite126859729ed4a5143e5690186b8bec1c1157113 (patch)
treee5af46301959fcc4461425477da4f6b0ca856b35 /drivers/spi/spi.c
parent2ae3de10abfe0be40c9d93ebc2f429b969abf008 (diff)
spi: Only defer to thread for cleanup when needed
Currently we always defer idling of controllers to the SPI thread, the goal being to ensure that we're doing teardown that's not suitable for atomic context in an appropriate context and to try to batch up more expensive teardown operations when the system is under higher load, allowing more work to be started before the SPI thread is scheduled. However when the controller does not require any substantial work to idle there is no need to do this, we can instead save the context switch and immediately mark the controller as idle. This is particularly useful for systems where there is frequent but not constant activity. Signed-off-by: Mark Brown <broonie@kernel.org> Link: https://lore.kernel.org/r/20200715163610.9475-1-broonie@kernel.org Signed-off-by: Mark Brown <broonie@kernel.org>
Diffstat (limited to 'drivers/spi/spi.c')
-rw-r--r--drivers/spi/spi.c26
1 files changed, 19 insertions, 7 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 1d7bba434225..0b260484b4f5 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1336,6 +1336,14 @@ void spi_finalize_current_transfer(struct spi_controller *ctlr)
}
EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
+static void spi_idle_runtime_pm(struct spi_controller *ctlr)
+{
+ if (ctlr->auto_runtime_pm) {
+ pm_runtime_mark_last_busy(ctlr->dev.parent);
+ pm_runtime_put_autosuspend(ctlr->dev.parent);
+ }
+}
+
/**
* __spi_pump_messages - function which processes spi message queue
* @ctlr: controller to process queue for
@@ -1380,10 +1388,17 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
return;
}
- /* Only do teardown in the thread */
+ /* Defer any non-atomic teardown to the thread */
if (!in_kthread) {
- kthread_queue_work(ctlr->kworker,
- &ctlr->pump_messages);
+ if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
+ !ctlr->unprepare_transfer_hardware) {
+ spi_idle_runtime_pm(ctlr);
+ ctlr->busy = false;
+ trace_spi_controller_idle(ctlr);
+ } else {
+ kthread_queue_work(ctlr->kworker,
+ &ctlr->pump_messages);
+ }
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
@@ -1400,10 +1415,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
ctlr->unprepare_transfer_hardware(ctlr))
dev_err(&ctlr->dev,
"failed to unprepare transfer hardware\n");
- if (ctlr->auto_runtime_pm) {
- pm_runtime_mark_last_busy(ctlr->dev.parent);
- pm_runtime_put_autosuspend(ctlr->dev.parent);
- }
+ spi_idle_runtime_pm(ctlr);
trace_spi_controller_idle(ctlr);
spin_lock_irqsave(&ctlr->queue_lock, flags);