From c3d175e4852bfdfd1e4021dff8715fc407dedd98 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 12 May 2021 16:15:48 +0200 Subject: cpufreq: intel_pstate: hybrid: Avoid exposing two global attributes The turbo_pct and num_pstates sysfs attributes represent CPU properties that may be different for differenty types of CPUs in a hybrid processor, so avoid exposing them in that case. Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 0e69dffd5a76..45f59e2827fe 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1365,8 +1365,6 @@ define_one_global_rw(energy_efficiency); static struct attribute *intel_pstate_attributes[] = { &status.attr, &no_turbo.attr, - &turbo_pct.attr, - &num_pstates.attr, NULL }; @@ -1391,6 +1389,14 @@ static void __init intel_pstate_sysfs_expose_params(void) if (WARN_ON(rc)) return; + if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { + rc = sysfs_create_file(intel_pstate_kobject, &turbo_pct.attr); + WARN_ON(rc); + + rc = sysfs_create_file(intel_pstate_kobject, &num_pstates.attr); + WARN_ON(rc); + } + /* * If per cpu limits are enforced there are no global limits, so * return without creating max/min_perf_pct attributes @@ -1417,6 +1423,11 @@ static void __init intel_pstate_sysfs_remove(void) sysfs_remove_group(intel_pstate_kobject, &intel_pstate_attr_group); + if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { + sysfs_remove_file(intel_pstate_kobject, &num_pstates.attr); + sysfs_remove_file(intel_pstate_kobject, &turbo_pct.attr); + } + if (!per_cpu_limits) { sysfs_remove_file(intel_pstate_kobject, &max_perf_pct.attr); sysfs_remove_file(intel_pstate_kobject, &min_perf_pct.attr); -- cgit v1.2.3 From eb3693f0521e020dd8617c7fa3ddf5c9f0d8dea0 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 12 May 2021 16:19:30 +0200 Subject: cpufreq: intel_pstate: hybrid: CPU-specific scaling factor The scaling factor between HWP performance levels and CPU frequency may be different for different types of CPUs in a hybrid processor and in general the HWP performance levels need not correspond to "P-states" representing values that would be written to MSR_IA32_PERF_CTL if HWP was disabled. However, the policy limits control in cpufreq is defined in terms of CPU frequency, so it is necessary to map the frequency limits set through that interface to HWP performance levels with reasonable accuracy and the behavior of that interface on hybrid processors has to be compatible with its behavior on non-hybrid ones. To address this problem, use the observations that (1) on hybrid processors the sysfs interface can operate by mapping frequency to "P-states" and translating those "P-states" to specific HWP performance levels of the given CPU and (2) the scaling factor between the MSR_IA32_PERF_CTL "P-states" and CPU frequency can be regarded as a known value. Moreover, the mapping between the HWP performance levels and CPU frequency can be assumed to be linear and such that HWP performance level 0 correspond to the frequency value of 0, so it is only necessary to know the frequency corresponding to one specific HWP performance level to compute the scaling factor applicable to all of them. One possibility is to take the nominal performance value from CPPC, if available, and use cpu_khz as the corresponding frequency. If the CPPC capabilities interface is not there or the nominal performance value provided by it is out of range, though, something else needs to be done. Namely, the guaranteed performance level either from CPPC or from MSR_HWP_CAPABILITIES can be used instead, but the corresponding frequency needs to be determined. That can be done by computing the product of the (known) scaling factor between the MSR_IA32_PERF_CTL P-states and CPU frequency (the PERF_CTL scaling factor) and the P-state value referred to as the "TDP ratio". If the HWP-to-frequency scaling factor value obtained in one of the ways above turns out to be euqal to the PERF_CTL scaling factor, it can be assumed that the number of HWP performance levels is equal to the number of P-states and the given CPU can be handled as though this was not a hybrid processor. Otherwise, one more adjustment may still need to be made, because the HWP-to-frequency scaling factor computed so far may not be accurate enough (e.g. because the CPPC information does not match the exact behavior of the processor). Specifically, in that case the frequency corresponding to the highest HWP performance value from MSR_HWP_CAPABILITIES (computed as the product of that value and the HWP-to-frequency scaling factor) cannot exceed the frequency that corresponds to the maximum 1-core turbo P-state value from MSR_TURBO_RATIO_LIMIT (computed as the procuct of that value and the PERF_CTL scaling factor) and the HWP-to-frequency scaling factor may need to be adjusted accordingly. Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 233 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 210 insertions(+), 23 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 45f59e2827fe..b0afb8629767 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -121,9 +121,10 @@ struct sample { * @max_pstate_physical:This is physical Max P state for a processor * This can be higher than the max_pstate which can * be limited by platform thermal design power limits - * @scaling: Scaling factor to convert frequency to cpufreq - * frequency units + * @perf_ctl_scaling: PERF_CTL P-state to frequency scaling factor + * @scaling: Scaling factor between performance and frequency * @turbo_pstate: Max Turbo P state possible for this platform + * @min_freq: @min_pstate frequency in cpufreq units * @max_freq: @max_pstate frequency in cpufreq units * @turbo_freq: @turbo_pstate frequency in cpufreq units * @@ -134,8 +135,10 @@ struct pstate_data { int min_pstate; int max_pstate; int max_pstate_physical; + int perf_ctl_scaling; int scaling; int turbo_pstate; + unsigned int min_freq; unsigned int max_freq; unsigned int turbo_freq; }; @@ -489,6 +492,149 @@ static int intel_pstate_get_cppc_guranteed(int cpu) } #endif /* CONFIG_ACPI_CPPC_LIB */ +static bool intel_pstate_cppc_perf_valid(u32 perf, struct cppc_perf_caps *caps) +{ + return perf && perf <= caps->highest_perf && perf >= caps->lowest_perf; +} + +static bool intel_pstate_cppc_perf_caps(struct cpudata *cpu, + struct cppc_perf_caps *caps) +{ + if (cppc_get_perf_caps(cpu->cpu, caps)) + return false; + + return caps->highest_perf && caps->lowest_perf <= caps->highest_perf; +} + +static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu) +{ + pr_debug("CPU%d: Using PERF_CTL scaling for HWP\n", cpu->cpu); + + cpu->pstate.scaling = cpu->pstate.perf_ctl_scaling; +} + +/** + * intel_pstate_hybrid_hwp_calibrate - Calibrate HWP performance levels. + * @cpu: Target CPU. + * + * On hybrid processors, HWP may expose more performance levels than there are + * P-states accessible through the PERF_CTL interface. If that happens, the + * scaling factor between HWP performance levels and CPU frequency will be less + * than the scaling factor between P-state values and CPU frequency. + * + * In that case, the scaling factor between HWP performance levels and CPU + * frequency needs to be determined which can be done with the help of the + * observation that certain HWP performance levels should correspond to certain + * P-states, like for example the HWP highest performance should correspond + * to the maximum turbo P-state of the CPU. + */ +static void intel_pstate_hybrid_hwp_calibrate(struct cpudata *cpu) +{ + struct cppc_perf_caps caps; + int perf_ctl_max_phys = cpu->pstate.max_pstate_physical; + int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; + int perf_ctl_turbo = pstate_funcs.get_turbo(); + int turbo_freq = perf_ctl_turbo * perf_ctl_scaling; + int perf_ctl_max = pstate_funcs.get_max(); + int max_freq = perf_ctl_max * perf_ctl_scaling; + int scaling = INT_MAX; + int freq; + + pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys); + pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, perf_ctl_max); + pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo); + pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling); + + pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate); + pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate); + + if (intel_pstate_cppc_perf_caps(cpu, &caps)) { + if (intel_pstate_cppc_perf_valid(caps.nominal_perf, &caps)) { + pr_debug("CPU%d: Using CPPC nominal\n", cpu->cpu); + + /* + * If the CPPC nominal performance is valid, it can be + * assumed to correspond to cpu_khz. + */ + if (caps.nominal_perf == perf_ctl_max_phys) { + intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); + return; + } + scaling = DIV_ROUND_UP(cpu_khz, caps.nominal_perf); + } else if (intel_pstate_cppc_perf_valid(caps.guaranteed_perf, &caps)) { + pr_debug("CPU%d: Using CPPC guaranteed\n", cpu->cpu); + + /* + * If the CPPC guaranteed performance is valid, it can + * be assumed to correspond to max_freq. + */ + if (caps.guaranteed_perf == perf_ctl_max) { + intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); + return; + } + scaling = DIV_ROUND_UP(max_freq, caps.guaranteed_perf); + } + } + /* + * If using the CPPC data to compute the HWP-to-frequency scaling factor + * doesn't work, use the HWP_CAP gauranteed perf for this purpose with + * the assumption that it corresponds to max_freq. + */ + if (scaling > perf_ctl_scaling) { + pr_debug("CPU%d: Using HWP_CAP guaranteed\n", cpu->cpu); + + if (cpu->pstate.max_pstate == perf_ctl_max) { + intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); + return; + } + scaling = DIV_ROUND_UP(max_freq, cpu->pstate.max_pstate); + if (scaling > perf_ctl_scaling) { + /* + * This should not happen, because it would mean that + * the number of HWP perf levels was less than the + * number of P-states, so use the PERF_CTL scaling in + * that case. + */ + pr_debug("CPU%d: scaling (%d) out of range\n", cpu->cpu, + scaling); + + intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); + return; + } + } + + /* + * If the product of the HWP performance scaling factor obtained above + * and the HWP_CAP highest performance is greater than the maximum turbo + * frequency corresponding to the pstate_funcs.get_turbo() return value, + * the scaling factor is too high, so recompute it so that the HWP_CAP + * highest performance corresponds to the maximum turbo frequency. + */ + if (turbo_freq < cpu->pstate.turbo_pstate * scaling) { + pr_debug("CPU%d: scaling too high (%d)\n", cpu->cpu, scaling); + + cpu->pstate.turbo_freq = turbo_freq; + scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate); + } + + cpu->pstate.scaling = scaling; + + pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling); + + cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling, + perf_ctl_scaling); + + freq = perf_ctl_max_phys * perf_ctl_scaling; + cpu->pstate.max_pstate_physical = DIV_ROUND_UP(freq, scaling); + + cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling; + /* + * Cast the min P-state value retrieved via pstate_funcs.get_min() to + * the effective range of HWP performance levels. + */ + cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling); +} + static inline void update_turbo_state(void) { u64 misc_en; @@ -795,19 +941,22 @@ cpufreq_freq_attr_rw(energy_performance_preference); static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf) { - struct cpudata *cpu; - u64 cap; - int ratio; + struct cpudata *cpu = all_cpu_data[policy->cpu]; + int ratio, freq; ratio = intel_pstate_get_cppc_guranteed(policy->cpu); if (ratio <= 0) { + u64 cap; + rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap); ratio = HWP_GUARANTEED_PERF(cap); } - cpu = all_cpu_data[policy->cpu]; + freq = ratio * cpu->pstate.scaling; + if (cpu->pstate.scaling != cpu->pstate.perf_ctl_scaling) + freq = rounddown(freq, cpu->pstate.perf_ctl_scaling); - return sprintf(buf, "%d\n", ratio * cpu->pstate.scaling); + return sprintf(buf, "%d\n", freq); } cpufreq_freq_attr_ro(base_frequency); @@ -831,9 +980,20 @@ static void __intel_pstate_get_hwp_cap(struct cpudata *cpu) static void intel_pstate_get_hwp_cap(struct cpudata *cpu) { + int scaling = cpu->pstate.scaling; + __intel_pstate_get_hwp_cap(cpu); - cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; - cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; + + cpu->pstate.max_freq = cpu->pstate.max_pstate * scaling; + cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling; + if (scaling != cpu->pstate.perf_ctl_scaling) { + int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; + + cpu->pstate.max_freq = rounddown(cpu->pstate.max_freq, + perf_ctl_scaling); + cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_freq, + perf_ctl_scaling); + } } static void intel_pstate_hwp_set(unsigned int cpu) @@ -1724,19 +1884,33 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu) static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) { + bool hybrid_cpu = boot_cpu_has(X86_FEATURE_HYBRID_CPU); + int perf_ctl_max_phys = pstate_funcs.get_max_physical(); + int perf_ctl_scaling = hybrid_cpu ? cpu_khz / perf_ctl_max_phys : + pstate_funcs.get_scaling(); + cpu->pstate.min_pstate = pstate_funcs.get_min(); - cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); - cpu->pstate.scaling = pstate_funcs.get_scaling(); + cpu->pstate.max_pstate_physical = perf_ctl_max_phys; + cpu->pstate.perf_ctl_scaling = perf_ctl_scaling; if (hwp_active && !hwp_mode_bdw) { __intel_pstate_get_hwp_cap(cpu); + + if (hybrid_cpu) + intel_pstate_hybrid_hwp_calibrate(cpu); + else + cpu->pstate.scaling = perf_ctl_scaling; } else { + cpu->pstate.scaling = perf_ctl_scaling; cpu->pstate.max_pstate = pstate_funcs.get_max(); cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); } - cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; - cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; + if (cpu->pstate.scaling == perf_ctl_scaling) { + cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling; + cpu->pstate.max_freq = cpu->pstate.max_pstate * perf_ctl_scaling; + cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * perf_ctl_scaling; + } if (pstate_funcs.get_aperf_mperf_shift) cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); @@ -2206,23 +2380,34 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu, unsigned int policy_min, unsigned int policy_max) { - int scaling = cpu->pstate.scaling; + int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; int32_t max_policy_perf, min_policy_perf; + max_policy_perf = policy_max / perf_ctl_scaling; + if (policy_max == policy_min) { + min_policy_perf = max_policy_perf; + } else { + min_policy_perf = policy_min / perf_ctl_scaling; + min_policy_perf = clamp_t(int32_t, min_policy_perf, + 0, max_policy_perf); + } + /* * HWP needs some special consideration, because HWP_REQUEST uses * abstract values to represent performance rather than pure ratios. */ - if (hwp_active) + if (hwp_active) { intel_pstate_get_hwp_cap(cpu); - max_policy_perf = policy_max / scaling; - if (policy_max == policy_min) { - min_policy_perf = max_policy_perf; - } else { - min_policy_perf = policy_min / scaling; - min_policy_perf = clamp_t(int32_t, min_policy_perf, - 0, max_policy_perf); + if (cpu->pstate.scaling != perf_ctl_scaling) { + int scaling = cpu->pstate.scaling; + int freq; + + freq = max_policy_perf * perf_ctl_scaling; + max_policy_perf = DIV_ROUND_UP(freq, scaling); + freq = min_policy_perf * perf_ctl_scaling; + min_policy_perf = DIV_ROUND_UP(freq, scaling); + } } pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n", @@ -2416,7 +2601,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) cpu->min_perf_ratio = 0; /* cpuinfo and default policy values */ - policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; + policy->cpuinfo.min_freq = cpu->pstate.min_freq; update_turbo_state(); global.turbo_disabled_mf = global.turbo_disabled; policy->cpuinfo.max_freq = global.turbo_disabled ? @@ -3146,6 +3331,8 @@ hwp_cpu_matched: } pr_info("HWP enabled\n"); + } else if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { + pr_warn("Problematic setup: Hybrid processor with disabled HWP\n"); } return 0; -- cgit v1.2.3 From fbdc21e9b038d00d0d56fa4e0f7701d42ae08f00 Mon Sep 17 00:00:00 2001 From: Giovanni Gherdovich Date: Tue, 18 May 2021 14:34:12 +0200 Subject: cpufreq: intel_pstate: Add Icelake servers support in no-HWP mode Users may disable HWP in firmware, in which case intel_pstate wouldn't load unless the CPU model is explicitly supported. Add ICELAKE_X to the list of CPUs that can register intel_pstate while not advertising the HWP capability. Without this change, an ICELAKE_X in no-HWP mode could only use the acpi_cpufreq frequency scaling driver. See also commit d8de7a44e11f ("cpufreq: intel_pstate: Add Skylake servers support"). Signed-off-by: Giovanni Gherdovich Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index b0afb8629767..d36d3b72d86b 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2272,6 +2272,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { X86_MATCH(ATOM_GOLDMONT, core_funcs), X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs), X86_MATCH(SKYLAKE_X, core_funcs), + X86_MATCH(ICELAKE_X, core_funcs), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); -- cgit v1.2.3 From 706c5328851d23dec4d9b433cbf864d900a54edf Mon Sep 17 00:00:00 2001 From: Giovanni Gherdovich Date: Tue, 18 May 2021 14:34:13 +0200 Subject: cpufreq: intel_pstate: Add Cometlake support in no-HWP mode Users may disable HWP in firmware, in which case intel_pstate wouldn't load unless the CPU model is explicitly supported. See also commit d8de7a44e11f ("cpufreq: intel_pstate: Add Skylake servers support"). Suggested-by: Doug Smythies Tested-by: Doug Smythies Signed-off-by: Giovanni Gherdovich Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index d36d3b72d86b..03d8516e653e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2272,6 +2272,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { X86_MATCH(ATOM_GOLDMONT, core_funcs), X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs), X86_MATCH(SKYLAKE_X, core_funcs), + X86_MATCH(COMETLAKE, core_funcs), X86_MATCH(ICELAKE_X, core_funcs), {} }; -- cgit v1.2.3 From 9ff6774b9718d1a72d1b7c580fc579f1d9d7071f Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 3 May 2021 09:18:40 -0700 Subject: cpufreq: sc520_freq: add 'fallthrough' to one case Quieten an implicit-fallthrough warning in sc520_freq.c: ../drivers/cpufreq/sc520_freq.c: In function 'sc520_freq_get_cpu_frequency': ../include/linux/printk.h:343:2: warning: this statement may fall through [-Wimplicit-fallthrough=] printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) ../drivers/cpufreq/sc520_freq.c:43:3: note: in expansion of macro 'pr_err' pr_err("error: cpuctl register has unexpected value %02x\n", ../drivers/cpufreq/sc520_freq.c:45:2: note: here case 0x01: Fixes: bf6fc9fd2d848 ("[CPUFREQ] AMD Elan SC520 cpufreq driver.") Signed-off-by: Randy Dunlap Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/sc520_freq.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c index 73a208559fe2..330c8d6cf93c 100644 --- a/drivers/cpufreq/sc520_freq.c +++ b/drivers/cpufreq/sc520_freq.c @@ -42,6 +42,7 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu) default: pr_err("error: cpuctl register has unexpected value %02x\n", clockspeed_reg); + fallthrough; case 0x01: return 100000; case 0x02: -- cgit v1.2.3 From 8df71a7dc5e1e0d8f1bb13145e00bf375fa2082e Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 26 May 2021 19:30:58 +0200 Subject: cpufreq: intel_pstate: hybrid: Fix build with CONFIG_ACPI unset One of the previous commits introducing hybrid processor support to intel_pstate broke build with CONFIG_ACPI unset. Fix that and while at it make empty stubs of two functions related to ACPI CPPC static inline and fix a spelling mistake in the name of one of them. Fixes: eb3693f0521e ("cpufreq: intel_pstate: hybrid: CPU-specific scaling factor") Signed-off-by: Rafael J. Wysocki Reported-by: Randy Dunlap Acked-by: Randy Dunlap # build-tested --- drivers/cpufreq/intel_pstate.c | 91 ++++++++++++++++++++++-------------------- 1 file changed, 48 insertions(+), 43 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 03d8516e653e..6012964df51b 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -369,7 +369,7 @@ static void intel_pstate_set_itmt_prio(int cpu) } } -static int intel_pstate_get_cppc_guranteed(int cpu) +static int intel_pstate_get_cppc_guaranteed(int cpu) { struct cppc_perf_caps cppc_perf; int ret; @@ -385,7 +385,7 @@ static int intel_pstate_get_cppc_guranteed(int cpu) } #else /* CONFIG_ACPI_CPPC_LIB */ -static void intel_pstate_set_itmt_prio(int cpu) +static inline void intel_pstate_set_itmt_prio(int cpu) { } #endif /* CONFIG_ACPI_CPPC_LIB */ @@ -470,6 +470,20 @@ static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) acpi_processor_unregister_performance(policy->cpu); } + +static bool intel_pstate_cppc_perf_valid(u32 perf, struct cppc_perf_caps *caps) +{ + return perf && perf <= caps->highest_perf && perf >= caps->lowest_perf; +} + +static bool intel_pstate_cppc_perf_caps(struct cpudata *cpu, + struct cppc_perf_caps *caps) +{ + if (cppc_get_perf_caps(cpu->cpu, caps)) + return false; + + return caps->highest_perf && caps->lowest_perf <= caps->highest_perf; +} #else /* CONFIG_ACPI */ static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) { @@ -486,26 +500,12 @@ static inline bool intel_pstate_acpi_pm_profile_server(void) #endif /* CONFIG_ACPI */ #ifndef CONFIG_ACPI_CPPC_LIB -static int intel_pstate_get_cppc_guranteed(int cpu) +static inline int intel_pstate_get_cppc_guaranteed(int cpu) { return -ENOTSUPP; } #endif /* CONFIG_ACPI_CPPC_LIB */ -static bool intel_pstate_cppc_perf_valid(u32 perf, struct cppc_perf_caps *caps) -{ - return perf && perf <= caps->highest_perf && perf >= caps->lowest_perf; -} - -static bool intel_pstate_cppc_perf_caps(struct cpudata *cpu, - struct cppc_perf_caps *caps) -{ - if (cppc_get_perf_caps(cpu->cpu, caps)) - return false; - - return caps->highest_perf && caps->lowest_perf <= caps->highest_perf; -} - static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu) { pr_debug("CPU%d: Using PERF_CTL scaling for HWP\n", cpu->cpu); @@ -530,7 +530,6 @@ static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu) */ static void intel_pstate_hybrid_hwp_calibrate(struct cpudata *cpu) { - struct cppc_perf_caps caps; int perf_ctl_max_phys = cpu->pstate.max_pstate_physical; int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; int perf_ctl_turbo = pstate_funcs.get_turbo(); @@ -548,33 +547,39 @@ static void intel_pstate_hybrid_hwp_calibrate(struct cpudata *cpu) pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate); pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate); - if (intel_pstate_cppc_perf_caps(cpu, &caps)) { - if (intel_pstate_cppc_perf_valid(caps.nominal_perf, &caps)) { - pr_debug("CPU%d: Using CPPC nominal\n", cpu->cpu); - - /* - * If the CPPC nominal performance is valid, it can be - * assumed to correspond to cpu_khz. - */ - if (caps.nominal_perf == perf_ctl_max_phys) { - intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); - return; - } - scaling = DIV_ROUND_UP(cpu_khz, caps.nominal_perf); - } else if (intel_pstate_cppc_perf_valid(caps.guaranteed_perf, &caps)) { - pr_debug("CPU%d: Using CPPC guaranteed\n", cpu->cpu); - - /* - * If the CPPC guaranteed performance is valid, it can - * be assumed to correspond to max_freq. - */ - if (caps.guaranteed_perf == perf_ctl_max) { - intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); - return; +#ifdef CONFIG_ACPI + if (IS_ENABLED(CONFIG_ACPI_CPPC_LIB)) { + struct cppc_perf_caps caps; + + if (intel_pstate_cppc_perf_caps(cpu, &caps)) { + if (intel_pstate_cppc_perf_valid(caps.nominal_perf, &caps)) { + pr_debug("CPU%d: Using CPPC nominal\n", cpu->cpu); + + /* + * If the CPPC nominal performance is valid, it + * can be assumed to correspond to cpu_khz. + */ + if (caps.nominal_perf == perf_ctl_max_phys) { + intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); + return; + } + scaling = DIV_ROUND_UP(cpu_khz, caps.nominal_perf); + } else if (intel_pstate_cppc_perf_valid(caps.guaranteed_perf, &caps)) { + pr_debug("CPU%d: Using CPPC guaranteed\n", cpu->cpu); + + /* + * If the CPPC guaranteed performance is valid, + * it can be assumed to correspond to max_freq. + */ + if (caps.guaranteed_perf == perf_ctl_max) { + intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); + return; + } + scaling = DIV_ROUND_UP(max_freq, caps.guaranteed_perf); } - scaling = DIV_ROUND_UP(max_freq, caps.guaranteed_perf); } } +#endif /* * If using the CPPC data to compute the HWP-to-frequency scaling factor * doesn't work, use the HWP_CAP gauranteed perf for this purpose with @@ -944,7 +949,7 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf) struct cpudata *cpu = all_cpu_data[policy->cpu]; int ratio, freq; - ratio = intel_pstate_get_cppc_guranteed(policy->cpu); + ratio = intel_pstate_get_cppc_guaranteed(policy->cpu); if (ratio <= 0) { u64 cap; -- cgit v1.2.3 From 5de1262500708bcf6eef753f5eb9d8adb3d32d33 Mon Sep 17 00:00:00 2001 From: Shaokun Zhang Date: Mon, 31 May 2021 15:16:07 +0800 Subject: cpufreq: stats: Clean up local variable in cpufreq_stats_create_table() Local variable 'count' will be initialized and 'ret' is also not required, so remove the redundant initialization and get rid of 'ret'. Signed-off-by: Shaokun Zhang Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq_stats.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index da717f7cd9a9..1570d6f3e75d 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -211,7 +211,7 @@ void cpufreq_stats_free_table(struct cpufreq_policy *policy) void cpufreq_stats_create_table(struct cpufreq_policy *policy) { - unsigned int i = 0, count = 0, ret = -ENOMEM; + unsigned int i = 0, count; struct cpufreq_stats *stats; unsigned int alloc_size; struct cpufreq_frequency_table *pos; @@ -253,8 +253,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy) stats->last_index = freq_table_get_index(stats, policy->cur); policy->stats = stats; - ret = sysfs_create_group(&policy->kobj, &stats_attr_group); - if (!ret) + if (!sysfs_create_group(&policy->kobj, &stats_attr_group)) return; /* We failed, release resources */ -- cgit v1.2.3 From 019694f5c1b9cc444e6a3fd3005f556d0c5a6b14 Mon Sep 17 00:00:00 2001 From: Hailong Liu Date: Sun, 6 Jun 2021 19:58:28 +0800 Subject: cpufreq: sh: Remove unused linux/sched.h headers Since commit '205dcc1ecbc5(cpufreq/sh: Replace racy task affinity logic)' the header is useless in sh-cpufreq.c, so remove it. Signed-off-by: Hailong Liu Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/sh-cpufreq.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c index 0ac265d47ef0..1a251e635ebd 100644 --- a/drivers/cpufreq/sh-cpufreq.c +++ b/drivers/cpufreq/sh-cpufreq.c @@ -23,7 +23,6 @@ #include #include #include -#include /* set_cpus_allowed() */ #include #include #include -- cgit v1.2.3 From bcc936c5d5159b4d1891d58f89301f74ff61a67d Mon Sep 17 00:00:00 2001 From: Hailong Liu Date: Thu, 3 Jun 2021 21:57:52 +0800 Subject: cpufreq: loongson2: Remove unused linux/sched.h headers Since commit 759f534e93ac(CPUFREQ: Loongson2: drop set_cpus_allowed_ptr()), the header is useless in oongson2_cpufreq.c, so remove it. Signed-off-by: Hailong Liu Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/loongson2_cpufreq.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index d05e761d9572..afc59b292153 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -16,7 +16,6 @@ #include #include #include -#include /* set_cpus_allowed() */ #include #include -- cgit v1.2.3