summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeffy Chen <jeffy.chen@rock-chips.com>2020-01-13 11:48:15 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-03-11 09:08:44 +0100
commitb8fe128dad8f97cc9af7c55a264d1fc5ab677195 (patch)
tree25c892594f1a7635cbefd7b614fef6651401e73e
parent847e33867b65fdc4747a15646d1fdb94e65740a6 (diff)
arch_topology: Adjust initial CPU capacities with current freq
The CPU freqs are not supposed to change before cpufreq policies properly registered, meaning that they should be used to calculate the initial CPU capacities. Doing this helps choosing the best CPU during early boot, especially for the initramfs decompressing. There's no functional changes for non-clk CPU DVFS mechanism. Signed-off-by: Jeffy Chen <jeffy.chen@rock-chips.com> Link: https://lore.kernel.org/r/20200113034815.25924-1-jeffy.chen@rock-chips.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/base/arch_topology.c40
1 files changed, 28 insertions, 12 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 6119e11a9f95..b56c33e5b6a8 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -94,7 +94,7 @@ static void update_topology_flags_workfn(struct work_struct *work)
update_topology = 0;
}
-static u32 capacity_scale;
+static DEFINE_PER_CPU(u32, freq_factor) = 1;
static u32 *raw_capacity;
static int free_raw_capacity(void)
@@ -108,17 +108,23 @@ static int free_raw_capacity(void)
void topology_normalize_cpu_scale(void)
{
u64 capacity;
+ u64 capacity_scale;
int cpu;
if (!raw_capacity)
return;
- pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
+ capacity_scale = 1;
for_each_possible_cpu(cpu) {
- pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
- cpu, raw_capacity[cpu]);
- capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
- / capacity_scale;
+ capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
+ capacity_scale = max(capacity, capacity_scale);
+ }
+
+ pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
+ for_each_possible_cpu(cpu) {
+ capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
+ capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
+ capacity_scale);
topology_set_cpu_scale(cpu, capacity);
pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
cpu, topology_get_cpu_scale(cpu));
@@ -127,6 +133,7 @@ void topology_normalize_cpu_scale(void)
bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
{
+ struct clk *cpu_clk;
static bool cap_parsing_failed;
int ret;
u32 cpu_capacity;
@@ -146,10 +153,22 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
return false;
}
}
- capacity_scale = max(cpu_capacity, capacity_scale);
raw_capacity[cpu] = cpu_capacity;
pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
cpu_node, raw_capacity[cpu]);
+
+ /*
+ * Update freq_factor for calculating early boot cpu capacities.
+ * For non-clk CPU DVFS mechanism, there's no way to get the
+ * frequency value now, assuming they are running at the same
+ * frequency (by keeping the initial freq_factor value).
+ */
+ cpu_clk = of_clk_get(cpu_node, 0);
+ if (!PTR_ERR_OR_ZERO(cpu_clk))
+ per_cpu(freq_factor, cpu) =
+ clk_get_rate(cpu_clk) / 1000;
+
+ clk_put(cpu_clk);
} else {
if (raw_capacity) {
pr_err("cpu_capacity: missing %pOF raw capacity\n",
@@ -188,11 +207,8 @@ init_cpu_capacity_callback(struct notifier_block *nb,
cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
- for_each_cpu(cpu, policy->related_cpus) {
- raw_capacity[cpu] = topology_get_cpu_scale(cpu) *
- policy->cpuinfo.max_freq / 1000UL;
- capacity_scale = max(raw_capacity[cpu], capacity_scale);
- }
+ for_each_cpu(cpu, policy->related_cpus)
+ per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
if (cpumask_empty(cpus_to_visit)) {
topology_normalize_cpu_scale();