summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/power/domain.c38
-rw-r--r--drivers/base/power/opp.c28
-rw-r--r--drivers/cpufreq/Kconfig.arm1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c39
-rw-r--r--drivers/cpufreq/cpufreq.c9
-rw-r--r--drivers/cpufreq/intel_pstate.c38
-rw-r--r--drivers/cpuidle/coupled.c22
-rw-r--r--drivers/cpuidle/cpuidle.h6
-rw-r--r--drivers/cpuidle/driver.c4
-rw-r--r--drivers/staging/board/armadillo800eva.c2
-rw-r--r--drivers/staging/board/board.c36
11 files changed, 180 insertions, 43 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 416720159e96..16550c63d611 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -213,6 +213,18 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
}
/**
+ * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
+ * @genpd: PM domait to power off.
+ *
+ * Queue up the execution of pm_genpd_poweroff() unless it's already been done
+ * before.
+ */
+static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
+{
+ queue_work(pm_wq, &genpd->power_off_work);
+}
+
+/**
* __pm_genpd_poweron - Restore power to a given PM domain and its masters.
* @genpd: PM domain to power up.
*
@@ -259,8 +271,12 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
return 0;
err:
- list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
+ list_for_each_entry_continue_reverse(link,
+ &genpd->slave_links,
+ slave_node) {
genpd_sd_counter_dec(link->master);
+ genpd_queue_power_off_work(link->master);
+ }
return ret;
}
@@ -349,18 +365,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
}
/**
- * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
- * @genpd: PM domait to power off.
- *
- * Queue up the execution of pm_genpd_poweroff() unless it's already been done
- * before.
- */
-static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
-{
- queue_work(pm_wq, &genpd->power_off_work);
-}
-
-/**
* pm_genpd_poweroff - Remove power from a given PM domain.
* @genpd: PM domain to power down.
*
@@ -1469,6 +1473,13 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
mutex_lock(&genpd->lock);
+ if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
+ pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
+ subdomain->name);
+ ret = -EBUSY;
+ goto out;
+ }
+
list_for_each_entry(link, &genpd->master_links, master_node) {
if (link->slave != subdomain)
continue;
@@ -1487,6 +1498,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
break;
}
+out:
mutex_unlock(&genpd->lock);
return ret;
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index eb254497a494..28cd75c535b0 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -341,6 +341,34 @@ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
/**
+ * dev_pm_opp_get_suspend_opp() - Get suspend opp
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns pointer to the suspend opp if it is
+ * defined and available, otherwise it returns NULL.
+ *
+ * Locking: This function must be called under rcu_read_lock(). opp is a rcu
+ * protected pointer. The reason for the same is that the opp pointer which is
+ * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * under the locked area. The pointer returned must be used prior to unlocking
+ * with rcu_read_unlock() to maintain the integrity of the pointer.
+ */
+struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
+{
+ struct device_opp *dev_opp;
+
+ opp_rcu_lockdep_assert();
+
+ dev_opp = _find_device_opp(dev);
+ if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
+ !dev_opp->suspend_opp->available)
+ return NULL;
+
+ return dev_opp->suspend_opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
+
+/**
* dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
* @dev: device for which we do this operation
*
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 5f498d9f1825..cd0391e46c6d 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -84,6 +84,7 @@ config ARM_KIRKWOOD_CPUFREQ
config ARM_MT8173_CPUFREQ
bool "Mediatek MT8173 CPUFreq support"
depends on ARCH_MEDIATEK && REGULATOR
+ depends on !CPU_THERMAL || THERMAL=y
select PM_OPP
help
This adds the CPUFreq driver support for Mediatek MT8173 SoC.
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index c3583cdfadbd..7c0d70e2a861 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -196,6 +196,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
struct device *cpu_dev;
struct regulator *cpu_reg;
struct clk *cpu_clk;
+ struct dev_pm_opp *suspend_opp;
unsigned long min_uV = ~0, max_uV = 0;
unsigned int transition_latency;
bool need_update = false;
@@ -239,6 +240,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
*/
of_cpumask_init_opp_table(policy->cpus);
+ /*
+ * But we need OPP table to function so if it is not there let's
+ * give platform code chance to provide it for us.
+ */
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ pr_debug("OPP table is not ready, deferring probe\n");
+ ret = -EPROBE_DEFER;
+ goto out_free_opp;
+ }
+
if (need_update) {
struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
@@ -249,24 +261,16 @@ static int cpufreq_init(struct cpufreq_policy *policy)
* OPP tables are initialized only for policy->cpu, do it for
* others as well.
*/
- set_cpus_sharing_opps(cpu_dev, policy->cpus);
+ ret = set_cpus_sharing_opps(cpu_dev, policy->cpus);
+ if (ret)
+ dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
+ __func__, ret);
of_property_read_u32(np, "clock-latency", &transition_latency);
} else {
transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev);
}
- /*
- * But we need OPP table to function so if it is not there let's
- * give platform code chance to provide it for us.
- */
- ret = dev_pm_opp_get_opp_count(cpu_dev);
- if (ret <= 0) {
- pr_debug("OPP table is not ready, deferring probe\n");
- ret = -EPROBE_DEFER;
- goto out_free_opp;
- }
-
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
@@ -300,7 +304,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
rcu_read_unlock();
tol_uV = opp_uV * priv->voltage_tolerance / 100;
- if (regulator_is_supported_voltage(cpu_reg, opp_uV,
+ if (regulator_is_supported_voltage(cpu_reg,
+ opp_uV - tol_uV,
opp_uV + tol_uV)) {
if (opp_uV < min_uV)
min_uV = opp_uV;
@@ -329,6 +334,13 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->driver_data = priv;
policy->clk = cpu_clk;
+
+ rcu_read_lock();
+ suspend_opp = dev_pm_opp_get_suspend_opp(cpu_dev);
+ if (suspend_opp)
+ policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000;
+ rcu_read_unlock();
+
ret = cpufreq_table_validate_and_show(policy, freq_table);
if (ret) {
dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
@@ -419,6 +431,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
.ready = cpufreq_ready,
.name = "cpufreq-dt",
.attr = cpufreq_dt_attr,
+ .suspend = cpufreq_generic_suspend,
};
static int dt_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b3d9368339af..6633b3fa996e 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -239,7 +239,7 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
EXPORT_SYMBOL_GPL(cpufreq_generic_init);
/* Only for cpufreq core internal use */
-struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
+static struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
{
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
@@ -1626,8 +1626,8 @@ int cpufreq_generic_suspend(struct cpufreq_policy *policy)
int ret;
if (!policy->suspend_freq) {
- pr_err("%s: suspend_freq can't be zero\n", __func__);
- return -EINVAL;
+ pr_debug("%s: suspend_freq not defined\n", __func__);
+ return 0;
}
pr_debug("%s: Setting suspend-freq: %u\n", __func__,
@@ -2031,8 +2031,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
if (!try_module_get(policy->governor->owner))
return -EINVAL;
- pr_debug("__cpufreq_governor for CPU %u, event %u\n",
- policy->cpu, event);
+ pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
mutex_lock(&cpufreq_governor_lock);
if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index cddc61939a86..3af9dd7332e6 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -260,24 +260,31 @@ static inline void update_turbo_state(void)
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
}
-#define PCT_TO_HWP(x) (x * 255 / 100)
static void intel_pstate_hwp_set(void)
{
- int min, max, cpu;
- u64 value, freq;
+ int min, hw_min, max, hw_max, cpu, range, adj_range;
+ u64 value, cap;
+
+ rdmsrl(MSR_HWP_CAPABILITIES, cap);
+ hw_min = HWP_LOWEST_PERF(cap);
+ hw_max = HWP_HIGHEST_PERF(cap);
+ range = hw_max - hw_min;
get_online_cpus();
for_each_online_cpu(cpu) {
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
- min = PCT_TO_HWP(limits.min_perf_pct);
+ adj_range = limits.min_perf_pct * range / 100;
+ min = hw_min + adj_range;
value &= ~HWP_MIN_PERF(~0L);
value |= HWP_MIN_PERF(min);
- max = PCT_TO_HWP(limits.max_perf_pct);
+ adj_range = limits.max_perf_pct * range / 100;
+ max = hw_min + adj_range;
if (limits.no_turbo) {
- rdmsrl( MSR_HWP_CAPABILITIES, freq);
- max = HWP_GUARANTEED_PERF(freq);
+ hw_max = HWP_GUARANTEED_PERF(cap);
+ if (hw_max < max)
+ max = hw_max;
}
value &= ~HWP_MAX_PERF(~0L);
@@ -423,6 +430,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
+ limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
+ limits.max_perf_pct = max(limits.min_perf_pct, limits.max_perf_pct);
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
if (hwp_active)
@@ -442,6 +451,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
limits.min_sysfs_pct = clamp_t(int, input, 0 , 100);
limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
+ limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
+ limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
if (hwp_active)
@@ -989,12 +1000,19 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100);
- limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
- limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
-
limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
+
+ /* Normalize user input to [min_policy_pct, max_policy_pct] */
+ limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
+ limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
+ limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
+
+ /* Make sure min_perf_pct <= max_perf_pct */
+ limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
+
+ limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
if (hwp_active)
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 1523e2d745eb..344058f8501a 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -187,6 +187,28 @@ bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)
}
/**
+ * cpuidle_coupled_state_verify - check if the coupled states are correctly set.
+ * @drv: struct cpuidle_driver for the platform
+ *
+ * Returns 0 for valid state values, a negative error code otherwise:
+ * * -EINVAL if any coupled state(safe_state_index) is wrongly set.
+ */
+int cpuidle_coupled_state_verify(struct cpuidle_driver *drv)
+{
+ int i;
+
+ for (i = drv->state_count - 1; i >= 0; i--) {
+ if (cpuidle_state_is_coupled(drv, i) &&
+ (drv->safe_state_index == i ||
+ drv->safe_state_index < 0 ||
+ drv->safe_state_index >= drv->state_count))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* cpuidle_coupled_set_ready - mark a cpu as ready
* @coupled: the struct coupled that contains the current cpu
*/
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 178c5ad3d568..f87f399b0540 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -35,6 +35,7 @@ extern void cpuidle_remove_sysfs(struct cpuidle_device *dev);
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state);
+int cpuidle_coupled_state_verify(struct cpuidle_driver *drv);
int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int next_state);
int cpuidle_coupled_register_device(struct cpuidle_device *dev);
@@ -46,6 +47,11 @@ bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)
return false;
}
+static inline int cpuidle_coupled_state_verify(struct cpuidle_driver *drv)
+{
+ return 0;
+}
+
static inline int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int next_state)
{
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 5db147859b90..389ade4572be 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -227,6 +227,10 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
if (!drv || !drv->state_count)
return -EINVAL;
+ ret = cpuidle_coupled_state_verify(drv);
+ if (ret)
+ return ret;
+
if (cpuidle_disabled())
return -ENODEV;
diff --git a/drivers/staging/board/armadillo800eva.c b/drivers/staging/board/armadillo800eva.c
index 81df77bd55cc..9c41652ee908 100644
--- a/drivers/staging/board/armadillo800eva.c
+++ b/drivers/staging/board/armadillo800eva.c
@@ -91,7 +91,7 @@ static const struct board_staging_dev armadillo800eva_devices[] __initconst = {
.pdev = &lcdc0_device,
.clocks = lcdc0_clocks,
.nclocks = ARRAY_SIZE(lcdc0_clocks),
- .domain = "a4lc",
+ .domain = "/system-controller@e6180000/pm-domains/c5/a4lc@1"
},
};
diff --git a/drivers/staging/board/board.c b/drivers/staging/board/board.c
index 29d456e29f38..3eb5eb8f069c 100644
--- a/drivers/staging/board/board.c
+++ b/drivers/staging/board/board.c
@@ -135,6 +135,40 @@ int __init board_staging_register_clock(const struct board_staging_clk *bsc)
return error;
}
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+static int board_staging_add_dev_domain(struct platform_device *pdev,
+ const char *domain)
+{
+ struct of_phandle_args pd_args;
+ struct generic_pm_domain *pd;
+ struct device_node *np;
+
+ np = of_find_node_by_path(domain);
+ if (!np) {
+ pr_err("Cannot find domain node %s\n", domain);
+ return -ENOENT;
+ }
+
+ pd_args.np = np;
+ pd_args.args_count = 0;
+ pd = of_genpd_get_from_provider(&pd_args);
+ if (IS_ERR(pd)) {
+ pr_err("Cannot find genpd %s (%ld)\n", domain, PTR_ERR(pd));
+ return PTR_ERR(pd);
+
+ }
+ pr_debug("Found genpd %s for device %s\n", pd->name, pdev->name);
+
+ return pm_genpd_add_device(pd, &pdev->dev);
+}
+#else
+static inline int board_staging_add_dev_domain(struct platform_device *pdev,
+ const char *domain)
+{
+ return 0;
+}
+#endif
+
int __init board_staging_register_device(const struct board_staging_dev *dev)
{
struct platform_device *pdev = dev->pdev;
@@ -161,7 +195,7 @@ int __init board_staging_register_device(const struct board_staging_dev *dev)
}
if (dev->domain)
- __pm_genpd_name_add_device(dev->domain, &pdev->dev, NULL);
+ board_staging_add_dev_domain(pdev, dev->domain);
return error;
}