summaryrefslogtreecommitdiff
path: root/drivers/base/power/main.c
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2017-12-16 02:05:48 +0100
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2017-12-16 02:05:48 +0100
commitc51a024e3913e9dbaf4dfcb9aaba825668a89ace (patch)
tree9772af495ced0bde0d3c58b6e6fb5b395b3afae9 /drivers/base/power/main.c
parent3487972d7fa6c5143951436ada5933dcf0ec659d (diff)
parent34fb8f0ba9ceea88e116688f9f53e3802c38aafb (diff)
Merge back PM core material for v4.16.
Diffstat (limited to 'drivers/base/power/main.c')
-rw-r--r--drivers/base/power/main.c102
1 files changed, 84 insertions, 18 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 08744b572af6..6e8cc5de93fd 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -18,7 +18,6 @@
*/
#include <linux/device.h>
-#include <linux/kallsyms.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/pm.h>
@@ -541,6 +540,18 @@ void dev_pm_skip_next_resume_phases(struct device *dev)
}
/**
+ * dev_pm_may_skip_resume - System-wide device resume optimization check.
+ * @dev: Target device.
+ *
+ * Checks whether or not the device may be left in suspend after a system-wide
+ * transition to the working state.
+ */
+bool dev_pm_may_skip_resume(struct device *dev)
+{
+ return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
+}
+
+/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
@@ -588,6 +599,18 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
error = dpm_run_callback(callback, dev, state, info);
dev->power.is_noirq_suspended = false;
+ if (dev_pm_may_skip_resume(dev)) {
+ /*
+ * The device is going to be left in suspend, but it might not
+ * have been in runtime suspend before the system suspended, so
+ * its runtime PM status needs to be updated to avoid confusing
+ * the runtime PM framework when runtime PM is enabled for the
+ * device again.
+ */
+ pm_runtime_set_suspended(dev);
+ dev_pm_skip_next_resume_phases(dev);
+ }
+
Out:
complete_all(&dev->power.completion);
TRACE_RESUME(error);
@@ -1089,6 +1112,22 @@ static pm_message_t resume_event(pm_message_t sleep_state)
return PMSG_ON;
}
+static void dpm_superior_set_must_resume(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ if (dev->parent)
+ dev->parent->power.must_resume = true;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ link->supplier->power.must_resume = true;
+
+ device_links_read_unlock(idx);
+}
+
/**
* __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
@@ -1140,10 +1179,28 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
}
error = dpm_run_callback(callback, dev, state, info);
- if (!error)
- dev->power.is_noirq_suspended = true;
- else
+ if (error) {
async_error = error;
+ goto Complete;
+ }
+
+ dev->power.is_noirq_suspended = true;
+
+ if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
+ /*
+ * The only safe strategy here is to require that if the device
+ * may not be left in suspend, resume callbacks must be invoked
+ * for it.
+ */
+ dev->power.must_resume = dev->power.must_resume ||
+ !dev->power.may_skip_resume ||
+ atomic_read(&dev->power.usage_count) > 1;
+ } else {
+ dev->power.must_resume = true;
+ }
+
+ if (dev->power.must_resume)
+ dpm_superior_set_must_resume(dev);
Complete:
complete_all(&dev->power.completion);
@@ -1435,6 +1492,22 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
return error;
}
+static void dpm_propagate_to_parent(struct device *dev)
+{
+ struct device *parent = dev->parent;
+
+ if (!parent)
+ return;
+
+ spin_lock_irq(&parent->power.lock);
+
+ parent->power.direct_complete = false;
+ if (dev->power.wakeup_path && !parent->power.ignore_children)
+ parent->power.wakeup_path = true;
+
+ spin_unlock_irq(&parent->power.lock);
+}
+
static void dpm_clear_suppliers_direct_complete(struct device *dev)
{
struct device_link *link;
@@ -1500,6 +1573,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dev->power.direct_complete = false;
}
+ dev->power.may_skip_resume = false;
+ dev->power.must_resume = false;
+
dpm_watchdog_set(&wd, dev);
device_lock(dev);
@@ -1543,19 +1619,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
End:
if (!error) {
- struct device *parent = dev->parent;
-
dev->power.is_suspended = true;
- if (parent) {
- spin_lock_irq(&parent->power.lock);
-
- dev->parent->power.direct_complete = false;
- if (dev->power.wakeup_path
- && !dev->parent->power.ignore_children)
- dev->parent->power.wakeup_path = true;
-
- spin_unlock_irq(&parent->power.lock);
- }
+ dpm_propagate_to_parent(dev);
dpm_clear_suppliers_direct_complete(dev);
}
@@ -1665,8 +1730,9 @@ static int device_prepare(struct device *dev, pm_message_t state)
if (dev->power.syscore)
return 0;
- WARN_ON(dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
- !pm_runtime_enabled(dev));
+ WARN_ON(!pm_runtime_enabled(dev) &&
+ dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_LEAVE_SUSPENDED));
/*
* If a device's parent goes into runtime suspend at the wrong time,