diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-07-07 13:22:59 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-07-07 13:22:59 -0700 |
commit | aef4226f914016cc00affa8476ba5164dcca56fd (patch) | |
tree | 1913ddbe24abbdc9c5b03eda015a838984d233fc /drivers | |
parent | c6e8c51f6978c7aa44641ea4e9071b42d921eb97 (diff) | |
parent | 843372db2e3bf9694e98a1ff9d0da6dc3d53aab8 (diff) |
Merge tag 'pm-5.14-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull more power management updates from Rafael Wysocki:
"These include cpufreq core simplifications and fixes, cpufreq driver
updates, cpuidle driver update, a generic power domains (genpd)
locking fix and a debug-related simplification of the PM core.
Specifics:
- Drop the ->stop_cpu() (not really useful) and ->resolve_freq()
(unused) cpufreq driver callbacks and modify the users of the
former accordingly (Viresh Kumar, Rafael Wysocki).
- Add frequency invariance support to the ACPI CPPC cpufreq driver
again along with the related fixes and cleanups (Viresh Kumar).
- Update the Meditak, qcom and SCMI ARM cpufreq drivers (Fabien
Parent, Seiya Wang, Sibi Sankar, Christophe JAILLET).
- Rename black/white-lists in the DT cpufreq driver (Viresh Kumar).
- Add generic performance domains support to the dvfs DT bindings
(Sudeep Holla).
- Refine locking in the generic power domains (genpd) support code to
avoid lock dependency issues (Stephen Boyd).
- Update the MSM and qcom ARM cpuidle drivers (Bartosz Dudziak).
- Simplify the PM core debug code by using ktime_us_delta() to
compute time interval lengths (Mark-PK Tsai)"
* tag 'pm-5.14-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (21 commits)
PM: domains: Shrink locking area of the gpd_list_lock
PM: sleep: Use ktime_us_delta() in initcall_debug_report()
cpufreq: CPPC: Add support for frequency invariance
arch_topology: Avoid use-after-free for scale_freq_data
cpufreq: CPPC: Pass structure instance by reference
cpufreq: CPPC: Fix potential memleak in cppc_cpufreq_cpu_init
cpufreq: Remove ->resolve_freq()
cpufreq: Reuse cpufreq_driver_resolve_freq() in __cpufreq_driver_target()
cpufreq: Remove the ->stop_cpu() driver callback
cpufreq: powernv: Migrate to ->exit() callback instead of ->stop_cpu()
cpufreq: CPPC: Migrate to ->exit() callback instead of ->stop_cpu()
cpufreq: intel_pstate: Combine ->stop_cpu() and ->offline()
cpuidle: qcom: Add SPM register data for MSM8226
dt-bindings: arm: msm: Add SAW2 for MSM8226
dt-bindings: cpufreq: update cpu type and clock name for MT8173 SoC
clk: mediatek: remove deprecated CLK_INFRA_CA57SEL for MT8173 SoC
cpufreq: dt: Rename black/white-lists
cpufreq: scmi: Fix an error message
cpufreq: mediatek: add support for mt8365
dt-bindings: dvfs: Add support for generic performance domains
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/base/arch_topology.c | 27 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 38 | ||||
-rw-r--r-- | drivers/base/power/main.c | 5 | ||||
-rw-r--r-- | drivers/cpufreq/Kconfig.arm | 10 | ||||
-rw-r--r-- | drivers/cpufreq/cppc_cpufreq.c | 324 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq-dt-platdev.c | 10 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 44 | ||||
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 11 | ||||
-rw-r--r-- | drivers/cpufreq/mediatek-cpufreq.c | 1 | ||||
-rw-r--r-- | drivers/cpufreq/powernv-cpufreq.c | 23 | ||||
-rw-r--r-- | drivers/cpufreq/scmi-cpufreq.c | 2 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle-qcom-spm.c | 14 |
12 files changed, 386 insertions, 123 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index c1179edc0f3b..921312a8d957 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -18,10 +18,11 @@ #include <linux/cpumask.h> #include <linux/init.h> #include <linux/percpu.h> +#include <linux/rcupdate.h> #include <linux/sched.h> #include <linux/smp.h> -static DEFINE_PER_CPU(struct scale_freq_data *, sft_data); +static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data); static struct cpumask scale_freq_counters_mask; static bool scale_freq_invariant; @@ -66,16 +67,20 @@ void topology_set_scale_freq_source(struct scale_freq_data *data, if (cpumask_empty(&scale_freq_counters_mask)) scale_freq_invariant = topology_scale_freq_invariant(); + rcu_read_lock(); + for_each_cpu(cpu, cpus) { - sfd = per_cpu(sft_data, cpu); + sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); /* Use ARCH provided counters whenever possible */ if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) { - per_cpu(sft_data, cpu) = data; + rcu_assign_pointer(per_cpu(sft_data, cpu), data); cpumask_set_cpu(cpu, &scale_freq_counters_mask); } } + rcu_read_unlock(); + update_scale_freq_invariant(true); } EXPORT_SYMBOL_GPL(topology_set_scale_freq_source); @@ -86,22 +91,32 @@ void topology_clear_scale_freq_source(enum scale_freq_source source, struct scale_freq_data *sfd; int cpu; + rcu_read_lock(); + for_each_cpu(cpu, cpus) { - sfd = per_cpu(sft_data, cpu); + sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); if (sfd && sfd->source == source) { - per_cpu(sft_data, cpu) = NULL; + rcu_assign_pointer(per_cpu(sft_data, cpu), NULL); cpumask_clear_cpu(cpu, &scale_freq_counters_mask); } } + rcu_read_unlock(); + + /* + * Make sure all references to previous sft_data are dropped to avoid + * use-after-free races. + */ + synchronize_rcu(); + update_scale_freq_invariant(false); } EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source); void topology_scale_freq_tick(void) { - struct scale_freq_data *sfd = *this_cpu_ptr(&sft_data); + struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data)); if (sfd) sfd->set_freq_scale(); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index ab0b740cc0f1..a934c679e6ce 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2018,8 +2018,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd, mutex_lock(&gpd_list_lock); list_add(&genpd->gpd_list_node, &gpd_list); - genpd_debug_add(genpd); mutex_unlock(&gpd_list_lock); + genpd_debug_add(genpd); return 0; } @@ -2206,12 +2206,19 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, static bool genpd_present(const struct generic_pm_domain *genpd) { + bool ret = false; const struct generic_pm_domain *gpd; - list_for_each_entry(gpd, &gpd_list, gpd_list_node) - if (gpd == genpd) - return true; - return false; + mutex_lock(&gpd_list_lock); + list_for_each_entry(gpd, &gpd_list, gpd_list_node) { + if (gpd == genpd) { + ret = true; + break; + } + } + mutex_unlock(&gpd_list_lock); + + return ret; } /** @@ -2222,15 +2229,13 @@ static bool genpd_present(const struct generic_pm_domain *genpd) int of_genpd_add_provider_simple(struct device_node *np, struct generic_pm_domain *genpd) { - int ret = -EINVAL; + int ret; if (!np || !genpd) return -EINVAL; - mutex_lock(&gpd_list_lock); - if (!genpd_present(genpd)) - goto unlock; + return -EINVAL; genpd->dev.of_node = np; @@ -2241,7 +2246,7 @@ int of_genpd_add_provider_simple(struct device_node *np, if (ret != -EPROBE_DEFER) dev_err(&genpd->dev, "Failed to add OPP table: %d\n", ret); - goto unlock; + return ret; } /* @@ -2259,16 +2264,13 @@ int of_genpd_add_provider_simple(struct device_node *np, dev_pm_opp_of_remove_table(&genpd->dev); } - goto unlock; + return ret; } genpd->provider = &np->fwnode; genpd->has_provider = true; -unlock: - mutex_unlock(&gpd_list_lock); - - return ret; + return 0; } EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple); @@ -2287,8 +2289,6 @@ int of_genpd_add_provider_onecell(struct device_node *np, if (!np || !data) return -EINVAL; - mutex_lock(&gpd_list_lock); - if (!data->xlate) data->xlate = genpd_xlate_onecell; @@ -2328,8 +2328,6 @@ int of_genpd_add_provider_onecell(struct device_node *np, if (ret < 0) goto error; - mutex_unlock(&gpd_list_lock); - return 0; error: @@ -2348,8 +2346,6 @@ error: } } - mutex_unlock(&gpd_list_lock); - return ret; } EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index f893c3c5af07..d568772152c2 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -220,16 +220,13 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime, void *cb, int error) { ktime_t rettime; - s64 nsecs; if (!pm_print_times_enabled) return; rettime = ktime_get(); - nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime)); - dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error, - (unsigned long long)nsecs >> 10); + (unsigned long long)ktime_us_delta(rettime, calltime)); } /** diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index e65e0a43be64..a5c5f70acfc9 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -19,6 +19,16 @@ config ACPI_CPPC_CPUFREQ If in doubt, say N. +config ACPI_CPPC_CPUFREQ_FIE + bool "Frequency Invariance support for CPPC cpufreq driver" + depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY + default y + help + This extends frequency invariance support in the CPPC cpufreq driver, + by using CPPC delivered and reference performance counters. + + If in doubt, say N. + config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM tristate "Allwinner nvmem based SUN50I CPUFreq driver" depends on ARCH_SUNXI diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 2f769b1630c5..d4c27022b9c9 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -10,14 +10,18 @@ #define pr_fmt(fmt) "CPPC Cpufreq:" fmt +#include <linux/arch_topology.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/dmi.h> +#include <linux/irq_work.h> +#include <linux/kthread.h> #include <linux/time.h> #include <linux/vmalloc.h> +#include <uapi/linux/sched/types.h> #include <asm/unaligned.h> @@ -57,6 +61,216 @@ static struct cppc_workaround_oem_info wa_info[] = { } }; +#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE + +/* Frequency invariance support */ +struct cppc_freq_invariance { + int cpu; + struct irq_work irq_work; + struct kthread_work work; + struct cppc_perf_fb_ctrs prev_perf_fb_ctrs; + struct cppc_cpudata *cpu_data; +}; + +static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv); +static struct kthread_worker *kworker_fie; + +static struct cpufreq_driver cppc_cpufreq_driver; +static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu); +static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data, + struct cppc_perf_fb_ctrs *fb_ctrs_t0, + struct cppc_perf_fb_ctrs *fb_ctrs_t1); + +/** + * cppc_scale_freq_workfn - CPPC arch_freq_scale updater for frequency invariance + * @work: The work item. + * + * The CPPC driver register itself with the topology core to provide its own + * implementation (cppc_scale_freq_tick()) of topology_scale_freq_tick() which + * gets called by the scheduler on every tick. + * + * Note that the arch specific counters have higher priority than CPPC counters, + * if available, though the CPPC driver doesn't need to have any special + * handling for that. + * + * On an invocation of cppc_scale_freq_tick(), we schedule an irq work (since we + * reach here from hard-irq context), which then schedules a normal work item + * and cppc_scale_freq_workfn() updates the per_cpu arch_freq_scale variable + * based on the counter updates since the last tick. + */ +static void cppc_scale_freq_workfn(struct kthread_work *work) +{ + struct cppc_freq_invariance *cppc_fi; + struct cppc_perf_fb_ctrs fb_ctrs = {0}; + struct cppc_cpudata *cpu_data; + unsigned long local_freq_scale; + u64 perf; + + cppc_fi = container_of(work, struct cppc_freq_invariance, work); + cpu_data = cppc_fi->cpu_data; + + if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) { + pr_warn("%s: failed to read perf counters\n", __func__); + return; + } + + perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs, + &fb_ctrs); + cppc_fi->prev_perf_fb_ctrs = fb_ctrs; + + perf <<= SCHED_CAPACITY_SHIFT; + local_freq_scale = div64_u64(perf, cpu_data->perf_caps.highest_perf); + + /* This can happen due to counter's overflow */ + if (unlikely(local_freq_scale > 1024)) + local_freq_scale = 1024; + + per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale; +} + +static void cppc_irq_work(struct irq_work *irq_work) +{ + struct cppc_freq_invariance *cppc_fi; + + cppc_fi = container_of(irq_work, struct cppc_freq_invariance, irq_work); + kthread_queue_work(kworker_fie, &cppc_fi->work); +} + +static void cppc_scale_freq_tick(void) +{ + struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id()); + + /* + * cppc_get_perf_ctrs() can potentially sleep, call that from the right + * context. + */ + irq_work_queue(&cppc_fi->irq_work); +} + +static struct scale_freq_data cppc_sftd = { + .source = SCALE_FREQ_SOURCE_CPPC, + .set_freq_scale = cppc_scale_freq_tick, +}; + +static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy) +{ + struct cppc_freq_invariance *cppc_fi; + int cpu, ret; + + if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate) + return; + + for_each_cpu(cpu, policy->cpus) { + cppc_fi = &per_cpu(cppc_freq_inv, cpu); + cppc_fi->cpu = cpu; + cppc_fi->cpu_data = policy->driver_data; + kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn); + init_irq_work(&cppc_fi->irq_work, cppc_irq_work); + + ret = cppc_get_perf_ctrs(cpu, &cppc_fi->prev_perf_fb_ctrs); + if (ret) { + pr_warn("%s: failed to read perf counters for cpu:%d: %d\n", + __func__, cpu, ret); + + /* + * Don't abort if the CPU was offline while the driver + * was getting registered. + */ + if (cpu_online(cpu)) + return; + } + } + + /* Register for freq-invariance */ + topology_set_scale_freq_source(&cppc_sftd, policy->cpus); +} + +/* + * We free all the resources on policy's removal and not on CPU removal as the + * irq-work are per-cpu and the hotplug core takes care of flushing the pending + * irq-works (hint: smpcfd_dying_cpu()) on CPU hotplug. Even if the kthread-work + * fires on another CPU after the concerned CPU is removed, it won't harm. + * + * We just need to make sure to remove them all on policy->exit(). + */ +static void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy) +{ + struct cppc_freq_invariance *cppc_fi; + int cpu; + + if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate) + return; + + /* policy->cpus will be empty here, use related_cpus instead */ + topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, policy->related_cpus); + + for_each_cpu(cpu, policy->related_cpus) { + cppc_fi = &per_cpu(cppc_freq_inv, cpu); + irq_work_sync(&cppc_fi->irq_work); + kthread_cancel_work_sync(&cppc_fi->work); + } +} + +static void __init cppc_freq_invariance_init(void) +{ + struct sched_attr attr = { + .size = sizeof(struct sched_attr), + .sched_policy = SCHED_DEADLINE, + .sched_nice = 0, + .sched_priority = 0, + /* + * Fake (unused) bandwidth; workaround to "fix" + * priority inheritance. + */ + .sched_runtime = 1000000, + .sched_deadline = 10000000, + .sched_period = 10000000, + }; + int ret; + + if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate) + return; + + kworker_fie = kthread_create_worker(0, "cppc_fie"); + if (IS_ERR(kworker_fie)) + return; + + ret = sched_setattr_nocheck(kworker_fie->task, &attr); + if (ret) { + pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__, + ret); + kthread_destroy_worker(kworker_fie); + return; + } +} + +static void cppc_freq_invariance_exit(void) +{ + if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate) + return; + + kthread_destroy_worker(kworker_fie); + kworker_fie = NULL; +} + +#else +static inline void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy) +{ +} + +static inline void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy) +{ +} + +static inline void cppc_freq_invariance_init(void) +{ +} + +static inline void cppc_freq_invariance_exit(void) +{ +} +#endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */ + /* Callback function used to retrieve the max frequency from DMI */ static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) { @@ -182,27 +396,6 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy) return 0; } -static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy) -{ - struct cppc_cpudata *cpu_data = policy->driver_data; - struct cppc_perf_caps *caps = &cpu_data->perf_caps; - unsigned int cpu = policy->cpu; - int ret; - - cpu_data->perf_ctrls.desired_perf = caps->lowest_perf; - - ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); - if (ret) - pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", - caps->lowest_perf, cpu, ret); - - /* Remove CPU node from list and free driver data for policy */ - free_cpumask_var(cpu_data->shared_cpu_map); - list_del(&cpu_data->node); - kfree(policy->driver_data); - policy->driver_data = NULL; -} - /* * The PCC subspace describes the rate at which platform can accept commands * on the shared PCC channel (including READs which do not count towards freq @@ -277,6 +470,16 @@ out: return NULL; } +static void cppc_cpufreq_put_cpu_data(struct cpufreq_policy *policy) +{ + struct cppc_cpudata *cpu_data = policy->driver_data; + + list_del(&cpu_data->node); + free_cpumask_var(cpu_data->shared_cpu_map); + kfree(cpu_data); + policy->driver_data = NULL; +} + static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; @@ -330,7 +533,8 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) default: pr_debug("Unsupported CPU co-ord type: %d\n", policy->shared_type); - return -EFAULT; + ret = -EFAULT; + goto out; } /* @@ -345,13 +549,40 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) cpu_data->perf_ctrls.desired_perf = caps->highest_perf; ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); - if (ret) + if (ret) { pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", caps->highest_perf, cpu, ret); + goto out; + } + + cppc_cpufreq_cpu_fie_init(policy); + return 0; +out: + cppc_cpufreq_put_cpu_data(policy); return ret; } +static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy) +{ + struct cppc_cpudata *cpu_data = policy->driver_data; + struct cppc_perf_caps *caps = &cpu_data->perf_caps; + unsigned int cpu = policy->cpu; + int ret; + + cppc_cpufreq_cpu_fie_exit(policy); + + cpu_data->perf_ctrls.desired_perf = caps->lowest_perf; + + ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); + if (ret) + pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", + caps->lowest_perf, cpu, ret); + + cppc_cpufreq_put_cpu_data(policy); + return 0; +} + static inline u64 get_delta(u64 t1, u64 t0) { if (t1 > t0 || t0 > ~(u32)0) @@ -360,28 +591,25 @@ static inline u64 get_delta(u64 t1, u64 t0) return (u32)t1 - (u32)t0; } -static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data, - struct cppc_perf_fb_ctrs fb_ctrs_t0, - struct cppc_perf_fb_ctrs fb_ctrs_t1) +static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data, + struct cppc_perf_fb_ctrs *fb_ctrs_t0, + struct cppc_perf_fb_ctrs *fb_ctrs_t1) { u64 delta_reference, delta_delivered; - u64 reference_perf, delivered_perf; + u64 reference_perf; - reference_perf = fb_ctrs_t0.reference_perf; + reference_perf = fb_ctrs_t0->reference_perf; - delta_reference = get_delta(fb_ctrs_t1.reference, - fb_ctrs_t0.reference); - delta_delivered = get_delta(fb_ctrs_t1.delivered, - fb_ctrs_t0.delivered); + delta_reference = get_delta(fb_ctrs_t1->reference, + fb_ctrs_t0->reference); + delta_delivered = get_delta(fb_ctrs_t1->delivered, + fb_ctrs_t0->delivered); - /* Check to avoid divide-by zero */ - if (delta_reference || delta_delivered) - delivered_perf = (reference_perf * delta_delivered) / - delta_reference; - else - delivered_perf = cpu_data->perf_ctrls.desired_perf; + /* Check to avoid divide-by zero and invalid delivered_perf */ + if (!delta_reference || !delta_delivered) + return cpu_data->perf_ctrls.desired_perf; - return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf); + return (reference_perf * delta_delivered) / delta_reference; } static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) @@ -389,6 +617,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0}; struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); struct cppc_cpudata *cpu_data = policy->driver_data; + u64 delivered_perf; int ret; cpufreq_cpu_put(policy); @@ -403,7 +632,10 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) if (ret) return ret; - return cppc_get_rate_from_fbctrs(cpu_data, fb_ctrs_t0, fb_ctrs_t1); + delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0, + &fb_ctrs_t1); + + return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf); } static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state) @@ -451,7 +683,7 @@ static struct cpufreq_driver cppc_cpufreq_driver = { .target = cppc_cpufreq_set_target, .get = cppc_cpufreq_get_rate, .init = cppc_cpufreq_cpu_init, - .stop_cpu = cppc_cpufreq_stop_cpu, + .exit = cppc_cpufreq_cpu_exit, .set_boost = cppc_cpufreq_set_boost, .attr = cppc_cpufreq_attr, .name = "cppc_cpufreq", @@ -504,14 +736,21 @@ static void cppc_check_hisi_workaround(void) static int __init cppc_cpufreq_init(void) { + int ret; + if ((acpi_disabled) || !acpi_cpc_valid()) return -ENODEV; INIT_LIST_HEAD(&cpu_data_list); cppc_check_hisi_workaround(); + cppc_freq_invariance_init(); + + ret = cpufreq_register_driver(&cppc_cpufreq_driver); + if (ret) + cppc_freq_invariance_exit(); - return cpufreq_register_driver(&cppc_cpufreq_driver); + return ret; } static inline void free_cpu_data(void) @@ -529,6 +768,7 @@ static inline void free_cpu_data(void) static void __exit cppc_cpufreq_exit(void) { cpufreq_unregister_driver(&cppc_cpufreq_driver); + cppc_freq_invariance_exit(); free_cpu_data(); } diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 5e07065ec22f..bef7528aecd3 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -15,7 +15,7 @@ * Machines for which the cpufreq device is *always* created, mostly used for * platforms using "operating-points" (V1) property. */ -static const struct of_device_id whitelist[] __initconst = { +static const struct of_device_id allowlist[] __initconst = { { .compatible = "allwinner,sun4i-a10", }, { .compatible = "allwinner,sun5i-a10s", }, { .compatible = "allwinner,sun5i-a13", }, @@ -100,7 +100,7 @@ static const struct of_device_id whitelist[] __initconst = { * Machines for which the cpufreq device is *not* created, mostly used for * platforms using "operating-points-v2" property. */ -static const struct of_device_id blacklist[] __initconst = { +static const struct of_device_id blocklist[] __initconst = { { .compatible = "allwinner,sun50i-h6", }, { .compatible = "arm,vexpress", }, @@ -126,6 +126,7 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "mediatek,mt8173", }, { .compatible = "mediatek,mt8176", }, { .compatible = "mediatek,mt8183", }, + { .compatible = "mediatek,mt8365", }, { .compatible = "mediatek,mt8516", }, { .compatible = "nvidia,tegra20", }, @@ -137,6 +138,7 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "qcom,msm8996", }, { .compatible = "qcom,qcs404", }, { .compatible = "qcom,sc7180", }, + { .compatible = "qcom,sc7280", }, { .compatible = "qcom,sdm845", }, { .compatible = "st,stih407", }, @@ -177,13 +179,13 @@ static int __init cpufreq_dt_platdev_init(void) if (!np) return -ENODEV; - match = of_match_node(whitelist, np); + match = of_match_node(allowlist, np); if (match) { data = match->data; goto create_pdev; } - if (cpu0_node_has_opp_v2_prop() && !of_match_node(blacklist, np)) + if (cpu0_node_has_opp_v2_prop() && !of_match_node(blocklist, np)) goto create_pdev; of_node_put(np); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index cbab834c37a0..45f3416988f1 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -524,6 +524,22 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy) } EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch); +static unsigned int __resolve_freq(struct cpufreq_policy *policy, + unsigned int target_freq, unsigned int relation) +{ + unsigned int idx; + + target_freq = clamp_val(target_freq, policy->min, policy->max); + + if (!cpufreq_driver->target_index) + return target_freq; + + idx = cpufreq_frequency_table_target(policy, target_freq, relation); + policy->cached_resolved_idx = idx; + policy->cached_target_freq = target_freq; + return policy->freq_table[idx].frequency; +} + /** * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported * one. @@ -538,22 +554,7 @@ EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch); unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, unsigned int target_freq) { - target_freq = clamp_val(target_freq, policy->min, policy->max); - policy->cached_target_freq = target_freq; - - if (cpufreq_driver->target_index) { - unsigned int idx; - - idx = cpufreq_frequency_table_target(policy, target_freq, - CPUFREQ_RELATION_L); - policy->cached_resolved_idx = idx; - return policy->freq_table[idx].frequency; - } - - if (cpufreq_driver->resolve_freq) - return cpufreq_driver->resolve_freq(policy, target_freq); - - return target_freq; + return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_L); } EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq); @@ -1606,9 +1607,6 @@ static int cpufreq_offline(unsigned int cpu) policy->cdev = NULL; } - if (cpufreq_driver->stop_cpu) - cpufreq_driver->stop_cpu(policy); - if (has_target()) cpufreq_exit_governor(policy); @@ -2234,13 +2232,11 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int relation) { unsigned int old_target_freq = target_freq; - int index; if (cpufreq_disabled()) return -ENODEV; - /* Make sure that target_freq is within supported range */ - target_freq = clamp_val(target_freq, policy->min, policy->max); + target_freq = __resolve_freq(policy, target_freq, relation); pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", policy->cpu, target_freq, relation, old_target_freq); @@ -2261,9 +2257,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, if (!cpufreq_driver->target_index) return -EINVAL; - index = cpufreq_frequency_table_target(policy, target_freq, relation); - - return __target_index(policy, index); + return __target_index(policy, policy->cached_resolved_idx); } EXPORT_SYMBOL_GPL(__cpufreq_driver_target); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 6012964df51b..bb4549959b11 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2532,7 +2532,7 @@ static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) return 0; } -static int intel_pstate_cpu_offline(struct cpufreq_policy *policy) +static int intel_cpufreq_cpu_offline(struct cpufreq_policy *policy) { struct cpudata *cpu = all_cpu_data[policy->cpu]; @@ -2577,11 +2577,11 @@ static int intel_pstate_cpu_online(struct cpufreq_policy *policy) return 0; } -static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) +static int intel_pstate_cpu_offline(struct cpufreq_policy *policy) { - pr_debug("CPU %d stopping\n", policy->cpu); - intel_pstate_clear_update_util_hook(policy->cpu); + + return intel_cpufreq_cpu_offline(policy); } static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) @@ -2654,7 +2654,6 @@ static struct cpufreq_driver intel_pstate = { .resume = intel_pstate_resume, .init = intel_pstate_cpu_init, .exit = intel_pstate_cpu_exit, - .stop_cpu = intel_pstate_stop_cpu, .offline = intel_pstate_cpu_offline, .online = intel_pstate_cpu_online, .update_limits = intel_pstate_update_limits, @@ -2956,7 +2955,7 @@ static struct cpufreq_driver intel_cpufreq = { .fast_switch = intel_cpufreq_fast_switch, .init = intel_cpufreq_cpu_init, .exit = intel_cpufreq_cpu_exit, - .offline = intel_pstate_cpu_offline, + .offline = intel_cpufreq_cpu_offline, .online = intel_pstate_cpu_online, .suspend = intel_pstate_suspend, .resume = intel_pstate_resume, diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index f2e491b25b07..87019d5a9547 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -537,6 +537,7 @@ static const struct of_device_id mtk_cpufreq_machines[] __initconst = { { .compatible = "mediatek,mt8173", }, { .compatible = "mediatek,mt8176", }, { .compatible = "mediatek,mt8183", }, + { .compatible = "mediatek,mt8365", }, { .compatible = "mediatek,mt8516", }, { } diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index e439b43c19eb..005600cef273 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -875,7 +875,15 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy) static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy) { - /* timer is deleted in cpufreq_cpu_stop() */ + struct powernv_smp_call_data freq_data; + struct global_pstate_info *gpstates = policy->driver_data; + + freq_data.pstate_id = idx_to_pstate(powernv_pstate_info.min); + freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min); + smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1); + if (gpstates) + del_timer_sync(&gpstates->timer); + kfree(policy->driver_data); return 0; @@ -1007,18 +1015,6 @@ static struct notifier_block powernv_cpufreq_opal_nb = { .priority = 0, }; -static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy) -{ - struct powernv_smp_call_data freq_data; - struct global_pstate_info *gpstates = policy->driver_data; - - freq_data.pstate_id = idx_to_pstate(powernv_pstate_info.min); - freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min); - smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1); - if (gpstates) - del_timer_sync(&gpstates->timer); -} - static unsigned int powernv_fast_switch(struct cpufreq_policy *policy, unsigned int target_freq) { @@ -1042,7 +1038,6 @@ static struct cpufreq_driver powernv_cpufreq_driver = { .target_index = powernv_cpufreq_target_index, .fast_switch = powernv_fast_switch, .get = powernv_cpufreq_get, - .stop_cpu = powernv_cpufreq_stop_cpu, .attr = powernv_cpu_freq_attr, }; diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index c8a4364ad3c2..ec9a87ca2dbb 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -174,7 +174,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) nr_opp = dev_pm_opp_get_opp_count(cpu_dev); if (nr_opp <= 0) { dev_err(cpu_dev, "%s: No OPPs for this device: %d\n", - __func__, ret); + __func__, nr_opp); ret = -ENODEV; goto out_free_opp; diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c index adf91a6e4d7d..c0e7971da2da 100644 --- a/drivers/cpuidle/cpuidle-qcom-spm.c +++ b/drivers/cpuidle/cpuidle-qcom-spm.c @@ -87,6 +87,18 @@ static const struct spm_reg_data spm_reg_8974_8084_cpu = { .start_index[PM_SLEEP_MODE_SPC] = 3, }; +/* SPM register data for 8226 */ +static const struct spm_reg_data spm_reg_8226_cpu = { + .reg_offset = spm_reg_offset_v2_1, + .spm_cfg = 0x0, + .spm_dly = 0x3C102800, + .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90, + 0x5B, 0x60, 0x03, 0x60, 0x3B, 0x76, 0x76, 0x0B, 0x94, 0x5B, + 0x80, 0x10, 0x26, 0x30, 0x0F }, + .start_index[PM_SLEEP_MODE_STBY] = 0, + .start_index[PM_SLEEP_MODE_SPC] = 5, +}; + static const u8 spm_reg_offset_v1_1[SPM_REG_NR] = { [SPM_REG_CFG] = 0x08, [SPM_REG_SPM_CTL] = 0x20, @@ -259,6 +271,8 @@ static struct spm_driver_data *spm_get_drv(struct platform_device *pdev, } static const struct of_device_id spm_match_table[] = { + { .compatible = "qcom,msm8226-saw2-v2.1-cpu", + .data = &spm_reg_8226_cpu }, { .compatible = "qcom,msm8974-saw2-v2.1-cpu", .data = &spm_reg_8974_8084_cpu }, { .compatible = "qcom,apq8084-saw2-v2.1-cpu", |