diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2021-05-26 19:30:58 +0200 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2021-06-07 13:47:57 +0200 |
commit | 8df71a7dc5e1e0d8f1bb13145e00bf375fa2082e (patch) | |
tree | 52fb8a18fe49f9a457f368e8c1c2a485346843b6 /drivers | |
parent | 9ff6774b9718d1a72d1b7c580fc579f1d9d7071f (diff) |
cpufreq: intel_pstate: hybrid: Fix build with CONFIG_ACPI unset
One of the previous commits introducing hybrid processor support to
intel_pstate broke build with CONFIG_ACPI unset.
Fix that and while at it make empty stubs of two functions related
to ACPI CPPC static inline and fix a spelling mistake in the name of
one of them.
Fixes: eb3693f0521e ("cpufreq: intel_pstate: hybrid: CPU-specific scaling factor")
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Reported-by: Randy Dunlap <rdunlap@infradead.org>
Acked-by: Randy Dunlap <rdunlap@infradead.org> # build-tested
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 91 |
1 files changed, 48 insertions, 43 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 03d8516e653e..6012964df51b 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -369,7 +369,7 @@ static void intel_pstate_set_itmt_prio(int cpu) } } -static int intel_pstate_get_cppc_guranteed(int cpu) +static int intel_pstate_get_cppc_guaranteed(int cpu) { struct cppc_perf_caps cppc_perf; int ret; @@ -385,7 +385,7 @@ static int intel_pstate_get_cppc_guranteed(int cpu) } #else /* CONFIG_ACPI_CPPC_LIB */ -static void intel_pstate_set_itmt_prio(int cpu) +static inline void intel_pstate_set_itmt_prio(int cpu) { } #endif /* CONFIG_ACPI_CPPC_LIB */ @@ -470,6 +470,20 @@ static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) acpi_processor_unregister_performance(policy->cpu); } + +static bool intel_pstate_cppc_perf_valid(u32 perf, struct cppc_perf_caps *caps) +{ + return perf && perf <= caps->highest_perf && perf >= caps->lowest_perf; +} + +static bool intel_pstate_cppc_perf_caps(struct cpudata *cpu, + struct cppc_perf_caps *caps) +{ + if (cppc_get_perf_caps(cpu->cpu, caps)) + return false; + + return caps->highest_perf && caps->lowest_perf <= caps->highest_perf; +} #else /* CONFIG_ACPI */ static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) { @@ -486,26 +500,12 @@ static inline bool intel_pstate_acpi_pm_profile_server(void) #endif /* CONFIG_ACPI */ #ifndef CONFIG_ACPI_CPPC_LIB -static int intel_pstate_get_cppc_guranteed(int cpu) +static inline int intel_pstate_get_cppc_guaranteed(int cpu) { return -ENOTSUPP; } #endif /* CONFIG_ACPI_CPPC_LIB */ -static bool intel_pstate_cppc_perf_valid(u32 perf, struct cppc_perf_caps *caps) -{ - return perf && perf <= caps->highest_perf && perf >= caps->lowest_perf; -} - -static bool intel_pstate_cppc_perf_caps(struct cpudata *cpu, - struct cppc_perf_caps *caps) -{ - if (cppc_get_perf_caps(cpu->cpu, caps)) - return false; - - return caps->highest_perf && caps->lowest_perf <= caps->highest_perf; -} - static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu) { pr_debug("CPU%d: Using PERF_CTL scaling for HWP\n", cpu->cpu); @@ -530,7 +530,6 @@ static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu) */ static void intel_pstate_hybrid_hwp_calibrate(struct cpudata *cpu) { - struct cppc_perf_caps caps; int perf_ctl_max_phys = cpu->pstate.max_pstate_physical; int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; int perf_ctl_turbo = pstate_funcs.get_turbo(); @@ -548,33 +547,39 @@ static void intel_pstate_hybrid_hwp_calibrate(struct cpudata *cpu) pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate); pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate); - if (intel_pstate_cppc_perf_caps(cpu, &caps)) { - if (intel_pstate_cppc_perf_valid(caps.nominal_perf, &caps)) { - pr_debug("CPU%d: Using CPPC nominal\n", cpu->cpu); - - /* - * If the CPPC nominal performance is valid, it can be - * assumed to correspond to cpu_khz. - */ - if (caps.nominal_perf == perf_ctl_max_phys) { - intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); - return; - } - scaling = DIV_ROUND_UP(cpu_khz, caps.nominal_perf); - } else if (intel_pstate_cppc_perf_valid(caps.guaranteed_perf, &caps)) { - pr_debug("CPU%d: Using CPPC guaranteed\n", cpu->cpu); - - /* - * If the CPPC guaranteed performance is valid, it can - * be assumed to correspond to max_freq. - */ - if (caps.guaranteed_perf == perf_ctl_max) { - intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); - return; +#ifdef CONFIG_ACPI + if (IS_ENABLED(CONFIG_ACPI_CPPC_LIB)) { + struct cppc_perf_caps caps; + + if (intel_pstate_cppc_perf_caps(cpu, &caps)) { + if (intel_pstate_cppc_perf_valid(caps.nominal_perf, &caps)) { + pr_debug("CPU%d: Using CPPC nominal\n", cpu->cpu); + + /* + * If the CPPC nominal performance is valid, it + * can be assumed to correspond to cpu_khz. + */ + if (caps.nominal_perf == perf_ctl_max_phys) { + intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); + return; + } + scaling = DIV_ROUND_UP(cpu_khz, caps.nominal_perf); + } else if (intel_pstate_cppc_perf_valid(caps.guaranteed_perf, &caps)) { + pr_debug("CPU%d: Using CPPC guaranteed\n", cpu->cpu); + + /* + * If the CPPC guaranteed performance is valid, + * it can be assumed to correspond to max_freq. + */ + if (caps.guaranteed_perf == perf_ctl_max) { + intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); + return; + } + scaling = DIV_ROUND_UP(max_freq, caps.guaranteed_perf); } - scaling = DIV_ROUND_UP(max_freq, caps.guaranteed_perf); } } +#endif /* * If using the CPPC data to compute the HWP-to-frequency scaling factor * doesn't work, use the HWP_CAP gauranteed perf for this purpose with @@ -944,7 +949,7 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf) struct cpudata *cpu = all_cpu_data[policy->cpu]; int ratio, freq; - ratio = intel_pstate_get_cppc_guranteed(policy->cpu); + ratio = intel_pstate_get_cppc_guaranteed(policy->cpu); if (ratio <= 0) { u64 cap; |