summaryrefslogtreecommitdiff
path: root/kernel/sched/idle.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/idle.c')
-rw-r--r--kernel/sched/idle.c150
1 files changed, 134 insertions, 16 deletions
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index b7976a127178..8f4390a079c7 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -63,6 +63,136 @@ void __weak arch_cpu_idle(void)
local_irq_enable();
}
+/**
+ * cpuidle_idle_call - the main idle function
+ *
+ * NOTE: no locks or semaphores should be used here
+ * return non-zero on failure
+ */
+static int cpuidle_idle_call(void)
+{
+ struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ int next_state, entered_state, ret;
+ bool broadcast;
+
+ /*
+ * Check if the idle task must be rescheduled. If it is the
+ * case, exit the function after re-enabling the local irq and
+ * set again the polling flag
+ */
+ if (current_clr_polling_and_test()) {
+ local_irq_enable();
+ __current_set_polling();
+ return 0;
+ }
+
+ /*
+ * During the idle period, stop measuring the disabled irqs
+ * critical sections latencies
+ */
+ stop_critical_timings();
+
+ /*
+ * Tell the RCU framework we are entering an idle section,
+ * so no more rcu read side critical sections and one more
+ * step to the grace period
+ */
+ rcu_idle_enter();
+
+ /*
+ * Check if the cpuidle framework is ready, otherwise fallback
+ * to the default arch specific idle method
+ */
+ ret = cpuidle_enabled(drv, dev);
+
+ if (!ret) {
+ /*
+ * Ask the governor to choose an idle state it thinks
+ * it is convenient to go to. There is *always* a
+ * convenient idle state
+ */
+ next_state = cpuidle_select(drv, dev);
+
+ /*
+ * The idle task must be scheduled, it is pointless to
+ * go to idle, just update no idle residency and get
+ * out of this function
+ */
+ if (current_clr_polling_and_test()) {
+ dev->last_residency = 0;
+ entered_state = next_state;
+ local_irq_enable();
+ } else {
+ broadcast = !!(drv->states[next_state].flags &
+ CPUIDLE_FLAG_TIMER_STOP);
+
+ if (broadcast)
+ /*
+ * Tell the time framework to switch
+ * to a broadcast timer because our
+ * local timer will be shutdown. If a
+ * local timer is used from another
+ * cpu as a broadcast timer, this call
+ * may fail if it is not available
+ */
+ ret = clockevents_notify(
+ CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
+ &dev->cpu);
+
+ if (!ret) {
+ trace_cpu_idle_rcuidle(next_state, dev->cpu);
+
+ /*
+ * Enter the idle state previously
+ * returned by the governor
+ * decision. This function will block
+ * until an interrupt occurs and will
+ * take care of re-enabling the local
+ * interrupts
+ */
+ entered_state = cpuidle_enter(drv, dev,
+ next_state);
+
+ trace_cpu_idle_rcuidle(PWR_EVENT_EXIT,
+ dev->cpu);
+
+ if (broadcast)
+ clockevents_notify(
+ CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
+ &dev->cpu);
+
+ /*
+ * Give the governor an opportunity to reflect on the
+ * outcome
+ */
+ cpuidle_reflect(dev, entered_state);
+ }
+ }
+ }
+
+ /*
+ * We can't use the cpuidle framework, let's use the default
+ * idle routine
+ */
+ if (ret)
+ arch_cpu_idle();
+
+ __current_set_polling();
+
+ /*
+ * It is up to the idle functions to enable back the local
+ * interrupt
+ */
+ if (WARN_ON_ONCE(irqs_disabled()))
+ local_irq_enable();
+
+ rcu_idle_exit();
+ start_critical_timings();
+
+ return 0;
+}
+
/*
* Generic idle loop implementation
*/
@@ -90,23 +220,11 @@ static void cpu_idle_loop(void)
* know that the IPI is going to arrive right
* away
*/
- if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
+ if (cpu_idle_force_poll || tick_check_broadcast_expired())
cpu_idle_poll();
- } else {
- if (!current_clr_polling_and_test()) {
- stop_critical_timings();
- rcu_idle_enter();
- if (cpuidle_idle_call())
- arch_cpu_idle();
- if (WARN_ON_ONCE(irqs_disabled()))
- local_irq_enable();
- rcu_idle_exit();
- start_critical_timings();
- } else {
- local_irq_enable();
- }
- __current_set_polling();
- }
+ else
+ cpuidle_idle_call();
+
arch_cpu_idle_exit();
}