diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2021-04-26 16:56:50 +0200 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2021-04-26 16:56:50 +0200 |
commit | dd9f2ae92481ef9454e3fdce9d78f790c49cfadd (patch) | |
tree | 3a5a677d2c794bdd83b4d6023a9853c52ee891c6 /kernel | |
parent | 71f4dd34419f46435da717c72e93e9b03ff41a3f (diff) | |
parent | 733dda9cc849895349b2a64f398aeb56e437f99f (diff) |
Merge branch 'pm-cpufreq'
* pm-cpufreq: (22 commits)
cpufreq: Kconfig: fix documentation links
cpufreq: intel_pstate: Simplify intel_pstate_update_perf_limits()
cpufreq: armada-37xx: Fix module unloading
cpufreq: armada-37xx: Remove cur_frequency variable
cpufreq: armada-37xx: Fix determining base CPU frequency
cpufreq: armada-37xx: Fix driver cleanup when registration failed
clk: mvebu: armada-37xx-periph: Fix workaround for switching from L1 to L0
clk: mvebu: armada-37xx-periph: Fix switching CPU freq from 250 Mhz to 1 GHz
cpufreq: armada-37xx: Fix the AVS value for load L1
clk: mvebu: armada-37xx-periph: remove .set_parent method for CPU PM clock
cpufreq: armada-37xx: Fix setting TBG parent for load levels
cpufreq: Remove unused for_each_policy macro
cpufreq: dt: dev_pm_opp_of_cpumask_add_table() may return -EPROBE_DEFER
cpufreq: intel_pstate: Clean up frequency computations
cpufreq: cppc: simplify default delay_us setting
cpufreq: Rudimentary typos fix in the file s5pv210-cpufreq.c
cpufreq: CPPC: Add support for frequency invariance
ia64: fix format string for ia64-acpi-cpu-freq
cpufreq: schedutil: Call sugov_update_next_freq() before check to fast_switch_enabled
arch_topology: Export arch_freq_scale and helpers
...
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 1 | ||||
-rw-r--r-- | kernel/sched/cpufreq_schedutil.c | 29 |
2 files changed, 13 insertions, 17 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 98191218d891..b2890f6e6d6f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6384,6 +6384,7 @@ int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) { return __sched_setscheduler(p, attr, false, true); } +EXPORT_SYMBOL_GPL(sched_setattr_nocheck); /** * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 50cbad89f7fa..6ee9c9bbe505 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -114,19 +114,8 @@ static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time, return true; } -static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time, - unsigned int next_freq) +static void sugov_deferred_update(struct sugov_policy *sg_policy) { - if (sugov_update_next_freq(sg_policy, time, next_freq)) - cpufreq_driver_fast_switch(sg_policy->policy, next_freq); -} - -static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time, - unsigned int next_freq) -{ - if (!sugov_update_next_freq(sg_policy, time, next_freq)) - return; - if (!sg_policy->work_in_progress) { sg_policy->work_in_progress = true; irq_work_queue(&sg_policy->irq_work); @@ -366,16 +355,19 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time, sg_policy->cached_raw_freq = cached_freq; } + if (!sugov_update_next_freq(sg_policy, time, next_f)) + return; + /* * This code runs under rq->lock for the target CPU, so it won't run * concurrently on two different CPUs for the same target and it is not * necessary to acquire the lock in the fast switch case. */ if (sg_policy->policy->fast_switch_enabled) { - sugov_fast_switch(sg_policy, time, next_f); + cpufreq_driver_fast_switch(sg_policy->policy, next_f); } else { raw_spin_lock(&sg_policy->update_lock); - sugov_deferred_update(sg_policy, time, next_f); + sugov_deferred_update(sg_policy); raw_spin_unlock(&sg_policy->update_lock); } } @@ -454,12 +446,15 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags) if (sugov_should_update_freq(sg_policy, time)) { next_f = sugov_next_freq_shared(sg_cpu, time); + if (!sugov_update_next_freq(sg_policy, time, next_f)) + goto unlock; + if (sg_policy->policy->fast_switch_enabled) - sugov_fast_switch(sg_policy, time, next_f); + cpufreq_driver_fast_switch(sg_policy->policy, next_f); else - sugov_deferred_update(sg_policy, time, next_f); + sugov_deferred_update(sg_policy); } - +unlock: raw_spin_unlock(&sg_policy->update_lock); } |