summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-09-10 00:00:31 +0200
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-09-13 23:36:01 +0200
commit21ca6d2c52f8ca8638129c1dfc489d0b0ae68532 (patch)
treee94b64e65de87721ad94784585ed5c3b53e64257 /kernel
parent8c34ab1910a79319731107ec8ecd2e80893ea30c (diff)
cpufreq: schedutil: Add iowait boosting
Modify the schedutil cpufreq governor to boost the CPU frequency if the SCHED_CPUFREQ_IOWAIT flag is passed to it via cpufreq_update_util(). If that happens, the frequency is set to the maximum during the first update after receiving the SCHED_CPUFREQ_IOWAIT flag and then the boost is reduced by half during each following update. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Looks-good-to: Steve Muckle <smuckle@linaro.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/cpufreq_schedutil.c53
1 files changed, 49 insertions, 4 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index cb8a77b1ef1b..69e06898997d 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -47,11 +47,13 @@ struct sugov_cpu {
struct sugov_policy *sg_policy;
unsigned int cached_raw_freq;
+ unsigned long iowait_boost;
+ unsigned long iowait_boost_max;
+ u64 last_update;
/* The fields below are only needed when sharing a policy. */
unsigned long util;
unsigned long max;
- u64 last_update;
unsigned int flags;
};
@@ -155,6 +157,36 @@ static void sugov_get_util(unsigned long *util, unsigned long *max)
*max = cfs_max;
}
+static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
+ unsigned int flags)
+{
+ if (flags & SCHED_CPUFREQ_IOWAIT) {
+ sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
+ } else if (sg_cpu->iowait_boost) {
+ s64 delta_ns = time - sg_cpu->last_update;
+
+ /* Clear iowait_boost if the CPU apprears to have been idle. */
+ if (delta_ns > TICK_NSEC)
+ sg_cpu->iowait_boost = 0;
+ }
+}
+
+static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
+ unsigned long *max)
+{
+ unsigned long boost_util = sg_cpu->iowait_boost;
+ unsigned long boost_max = sg_cpu->iowait_boost_max;
+
+ if (!boost_util)
+ return;
+
+ if (*util * boost_max < *max * boost_util) {
+ *util = boost_util;
+ *max = boost_max;
+ }
+ sg_cpu->iowait_boost >>= 1;
+}
+
static void sugov_update_single(struct update_util_data *hook, u64 time,
unsigned int flags)
{
@@ -164,6 +196,9 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
unsigned long util, max;
unsigned int next_f;
+ sugov_set_iowait_boost(sg_cpu, time, flags);
+ sg_cpu->last_update = time;
+
if (!sugov_should_update_freq(sg_policy, time))
return;
@@ -171,6 +206,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
next_f = policy->cpuinfo.max_freq;
} else {
sugov_get_util(&util, &max);
+ sugov_iowait_boost(sg_cpu, &util, &max);
next_f = get_next_freq(sg_cpu, util, max);
}
sugov_update_commit(sg_policy, time, next_f);
@@ -189,6 +225,8 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
if (flags & SCHED_CPUFREQ_RT_DL)
return max_f;
+ sugov_iowait_boost(sg_cpu, &util, &max);
+
for_each_cpu(j, policy->cpus) {
struct sugov_cpu *j_sg_cpu;
unsigned long j_util, j_max;
@@ -203,12 +241,13 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
* frequency update and the time elapsed between the last update
* of the CPU utilization and the last frequency update is long
* enough, don't take the CPU into account as it probably is
- * idle now.
+ * idle now (and clear iowait_boost for it).
*/
delta_ns = last_freq_update_time - j_sg_cpu->last_update;
- if (delta_ns > TICK_NSEC)
+ if (delta_ns > TICK_NSEC) {
+ j_sg_cpu->iowait_boost = 0;
continue;
-
+ }
if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
return max_f;
@@ -218,6 +257,8 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
util = j_util;
max = j_max;
}
+
+ sugov_iowait_boost(j_sg_cpu, &util, &max);
}
return get_next_freq(sg_cpu, util, max);
@@ -238,6 +279,8 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
sg_cpu->util = util;
sg_cpu->max = max;
sg_cpu->flags = flags;
+
+ sugov_set_iowait_boost(sg_cpu, time, flags);
sg_cpu->last_update = time;
if (sugov_should_update_freq(sg_policy, time)) {
@@ -470,6 +513,8 @@ static int sugov_start(struct cpufreq_policy *policy)
sg_cpu->flags = SCHED_CPUFREQ_RT;
sg_cpu->last_update = 0;
sg_cpu->cached_raw_freq = 0;
+ sg_cpu->iowait_boost = 0;
+ sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
sugov_update_shared);
} else {