summaryrefslogtreecommitdiff
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig.arm25
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c8
-rw-r--r--drivers/cpufreq/arm_big_little.c2
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c100
-rw-r--r--drivers/cpufreq/bmips-cpufreq.c2
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c325
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c129
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c5
-rw-r--r--drivers/cpufreq/cpufreq-dt.c10
-rw-r--r--drivers/cpufreq/cpufreq-dt.h5
-rw-r--r--drivers/cpufreq/cpufreq.c78
-rw-r--r--drivers/cpufreq/cpufreq_governor.c14
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c32
-rw-r--r--drivers/cpufreq/intel_pstate.c227
-rw-r--r--drivers/cpufreq/longhaul.c4
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c14
-rw-r--r--drivers/cpufreq/pxa3xx-cpufreq.c2
-rw-r--r--drivers/cpufreq/qcom-cpufreq-kryo.c212
-rw-r--r--drivers/cpufreq/s3c2440-cpufreq.c2
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c2
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c4
-rw-r--r--drivers/cpufreq/sfi-cpufreq.c4
-rw-r--r--drivers/cpufreq/spear-cpufreq.c2
-rw-r--r--drivers/cpufreq/speedstep-lib.c2
-rw-r--r--drivers/cpufreq/tegra20-cpufreq.c200
-rw-r--r--drivers/cpufreq/ti-cpufreq.c7
28 files changed, 874 insertions, 548 deletions
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 7f56fe5183f2..52f5f1a2040c 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -20,7 +20,7 @@ config ACPI_CPPC_CPUFREQ
config ARM_ARMADA_37XX_CPUFREQ
tristate "Armada 37xx CPUFreq support"
- depends on ARCH_MVEBU
+ depends on ARCH_MVEBU && CPUFREQ_DT
help
This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
The Armada 37xx PMU supports 4 frequency and VDD levels.
@@ -71,16 +71,6 @@ config ARM_BRCMSTB_AVS_CPUFREQ
Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS.
-config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
- bool "Broadcom STB AVS CPUfreq driver sysfs debug capability"
- depends on ARM_BRCMSTB_AVS_CPUFREQ
- help
- Enabling this option turns on debug support via sysfs under
- /sys/kernel/debug/brcmstb-avs-cpufreq. It is possible to read all and
- write some AVS mailbox registers through sysfs entries.
-
- If in doubt, say N.
-
config ARM_EXYNOS5440_CPUFREQ
tristate "SAMSUNG EXYNOS5440"
depends on SOC_EXYNOS5440
@@ -134,6 +124,17 @@ config ARM_OMAP2PLUS_CPUFREQ
depends on ARCH_OMAP2PLUS
default ARCH_OMAP2PLUS
+config ARM_QCOM_CPUFREQ_KRYO
+ tristate "Qualcomm Kryo based CPUFreq"
+ depends on ARM64
+ depends on QCOM_QFPROM
+ depends on QCOM_SMEM
+ select PM_OPP
+ help
+ This adds the CPUFreq driver for Qualcomm Kryo SoC based boards.
+
+ If in doubt, say N.
+
config ARM_S3C_CPUFREQ
bool
help
@@ -274,7 +275,7 @@ config ARM_TANGO_CPUFREQ
default y
config ARM_TEGRA20_CPUFREQ
- bool "Tegra20 CPUFreq support"
+ tristate "Tegra20 CPUFreq support"
depends on ARCH_TEGRA
default y
help
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 8d24ade3bd02..fb4a2ecac43b 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
+obj-$(CONFIG_ARM_QCOM_CPUFREQ_KRYO) += qcom-cpufreq-kryo.o
obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o
obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 9449657d72f0..b61f4ec43e06 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -465,8 +465,8 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
return result;
}
-unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
- unsigned int target_freq)
+static unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
{
struct acpi_cpufreq_data *data = policy->driver_data;
struct acpi_processor_performance *perf;
@@ -759,8 +759,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_unreg;
}
- freq_table = kzalloc(sizeof(*freq_table) *
- (perf->state_count+1), GFP_KERNEL);
+ freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table),
+ GFP_KERNEL);
if (!freq_table) {
result = -ENOMEM;
goto err_unreg;
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index 1d7ef5fc1977..cf62a1f64dd7 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -280,7 +280,7 @@ static int merge_cluster_tables(void)
for (i = 0; i < MAX_CLUSTERS; i++)
count += get_table_count(freq_table[i]);
- table = kzalloc(sizeof(*table) * count, GFP_KERNEL);
+ table = kcalloc(count, sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index 72a2975499db..739da90ff3f6 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -23,6 +23,8 @@
#include <linux/regmap.h>
#include <linux/slab.h>
+#include "cpufreq-dt.h"
+
/* Power management in North Bridge register set */
#define ARMADA_37XX_NB_L0L1 0x18
#define ARMADA_37XX_NB_L2L3 0x1C
@@ -56,6 +58,16 @@
*/
#define LOAD_LEVEL_NR 4
+struct armada37xx_cpufreq_state {
+ struct regmap *regmap;
+ u32 nb_l0l1;
+ u32 nb_l2l3;
+ u32 nb_dyn_mod;
+ u32 nb_cpu_load;
+};
+
+static struct armada37xx_cpufreq_state *armada37xx_cpufreq_state;
+
struct armada_37xx_dvfs {
u32 cpu_freq_max;
u8 divider[LOAD_LEVEL_NR];
@@ -136,7 +148,7 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
clk_set_parent(clk, parent);
}
-static void __init armada37xx_cpufreq_disable_dvfs(struct regmap *base)
+static void armada37xx_cpufreq_disable_dvfs(struct regmap *base)
{
unsigned int reg = ARMADA_37XX_NB_DYN_MOD,
mask = ARMADA_37XX_NB_DFS_EN;
@@ -162,10 +174,47 @@ static void __init armada37xx_cpufreq_enable_dvfs(struct regmap *base)
regmap_update_bits(base, reg, mask, mask);
}
+static int armada37xx_cpufreq_suspend(struct cpufreq_policy *policy)
+{
+ struct armada37xx_cpufreq_state *state = armada37xx_cpufreq_state;
+
+ regmap_read(state->regmap, ARMADA_37XX_NB_L0L1, &state->nb_l0l1);
+ regmap_read(state->regmap, ARMADA_37XX_NB_L2L3, &state->nb_l2l3);
+ regmap_read(state->regmap, ARMADA_37XX_NB_CPU_LOAD,
+ &state->nb_cpu_load);
+ regmap_read(state->regmap, ARMADA_37XX_NB_DYN_MOD, &state->nb_dyn_mod);
+
+ return 0;
+}
+
+static int armada37xx_cpufreq_resume(struct cpufreq_policy *policy)
+{
+ struct armada37xx_cpufreq_state *state = armada37xx_cpufreq_state;
+
+ /* Ensure DVFS is disabled otherwise the following registers are RO */
+ armada37xx_cpufreq_disable_dvfs(state->regmap);
+
+ regmap_write(state->regmap, ARMADA_37XX_NB_L0L1, state->nb_l0l1);
+ regmap_write(state->regmap, ARMADA_37XX_NB_L2L3, state->nb_l2l3);
+ regmap_write(state->regmap, ARMADA_37XX_NB_CPU_LOAD,
+ state->nb_cpu_load);
+
+ /*
+ * NB_DYN_MOD register is the one that actually enable back DVFS if it
+ * was enabled before the suspend operation. This must be done last
+ * otherwise other registers are not writable.
+ */
+ regmap_write(state->regmap, ARMADA_37XX_NB_DYN_MOD, state->nb_dyn_mod);
+
+ return 0;
+}
+
static int __init armada37xx_cpufreq_driver_init(void)
{
+ struct cpufreq_dt_platform_data pdata;
struct armada_37xx_dvfs *dvfs;
struct platform_device *pdev;
+ unsigned long freq;
unsigned int cur_frequency;
struct regmap *nb_pm_base;
struct device *cpu_dev;
@@ -207,33 +256,58 @@ static int __init armada37xx_cpufreq_driver_init(void)
}
dvfs = armada_37xx_cpu_freq_info_get(cur_frequency);
- if (!dvfs)
+ if (!dvfs) {
+ clk_put(clk);
return -EINVAL;
+ }
+
+ armada37xx_cpufreq_state = kmalloc(sizeof(*armada37xx_cpufreq_state),
+ GFP_KERNEL);
+ if (!armada37xx_cpufreq_state) {
+ clk_put(clk);
+ return -ENOMEM;
+ }
+
+ armada37xx_cpufreq_state->regmap = nb_pm_base;
armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
clk_put(clk);
for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
load_lvl++) {
- unsigned long freq = cur_frequency / dvfs->divider[load_lvl];
+ freq = cur_frequency / dvfs->divider[load_lvl];
ret = dev_pm_opp_add(cpu_dev, freq, 0);
- if (ret) {
- /* clean-up the already added opp before leaving */
- while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
- freq = cur_frequency / dvfs->divider[load_lvl];
- dev_pm_opp_remove(cpu_dev, freq);
- }
- return ret;
- }
+ if (ret)
+ goto remove_opp;
}
/* Now that everything is setup, enable the DVFS at hardware level */
armada37xx_cpufreq_enable_dvfs(nb_pm_base);
- pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+ pdata.suspend = armada37xx_cpufreq_suspend;
+ pdata.resume = armada37xx_cpufreq_resume;
+
+ pdev = platform_device_register_data(NULL, "cpufreq-dt", -1, &pdata,
+ sizeof(pdata));
+ ret = PTR_ERR_OR_ZERO(pdev);
+ if (ret)
+ goto disable_dvfs;
+
+ return 0;
+
+disable_dvfs:
+ armada37xx_cpufreq_disable_dvfs(nb_pm_base);
+remove_opp:
+ /* clean-up the already added opp before leaving */
+ while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
+ freq = cur_frequency / dvfs->divider[load_lvl];
+ dev_pm_opp_remove(cpu_dev, freq);
+ }
+
+ kfree(armada37xx_cpufreq_state);
- return PTR_ERR_OR_ZERO(pdev);
+ return ret;
}
/* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
late_initcall(armada37xx_cpufreq_driver_init);
diff --git a/drivers/cpufreq/bmips-cpufreq.c b/drivers/cpufreq/bmips-cpufreq.c
index 1653151b77df..56a4ebbf00e0 100644
--- a/drivers/cpufreq/bmips-cpufreq.c
+++ b/drivers/cpufreq/bmips-cpufreq.c
@@ -71,7 +71,7 @@ bmips_cpufreq_get_freq_table(const struct cpufreq_policy *policy)
cpu_freq = htp_freq_to_cpu_freq(priv->clk_mult);
- table = kmalloc((priv->max_freqs + 1) * sizeof(*table), GFP_KERNEL);
+ table = kmalloc_array(priv->max_freqs + 1, sizeof(*table), GFP_KERNEL);
if (!table)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 6cdac1aaf23c..e6f9cbe5835f 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -49,13 +49,6 @@
#include <linux/platform_device.h>
#include <linux/semaphore.h>
-#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
-#include <linux/ctype.h>
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#endif
-
/* Max number of arguments AVS calls take */
#define AVS_MAX_CMD_ARGS 4
/*
@@ -182,88 +175,11 @@ struct private_data {
void __iomem *base;
void __iomem *avs_intr_base;
struct device *dev;
-#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
- struct dentry *debugfs;
-#endif
struct completion done;
struct semaphore sem;
struct pmap pmap;
};
-#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
-
-enum debugfs_format {
- DEBUGFS_NORMAL,
- DEBUGFS_FLOAT,
- DEBUGFS_REV,
-};
-
-struct debugfs_data {
- struct debugfs_entry *entry;
- struct private_data *priv;
-};
-
-struct debugfs_entry {
- char *name;
- u32 offset;
- fmode_t mode;
- enum debugfs_format format;
-};
-
-#define DEBUGFS_ENTRY(name, mode, format) { \
- #name, AVS_MBOX_##name, mode, format \
-}
-
-/*
- * These are used for debugfs only. Otherwise we use AVS_MBOX_PARAM() directly.
- */
-#define AVS_MBOX_PARAM1 AVS_MBOX_PARAM(0)
-#define AVS_MBOX_PARAM2 AVS_MBOX_PARAM(1)
-#define AVS_MBOX_PARAM3 AVS_MBOX_PARAM(2)
-#define AVS_MBOX_PARAM4 AVS_MBOX_PARAM(3)
-
-/*
- * This table stores the name, access permissions and offset for each hardware
- * register and is used to generate debugfs entries.
- */
-static struct debugfs_entry debugfs_entries[] = {
- DEBUGFS_ENTRY(COMMAND, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(STATUS, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(VOLTAGE0, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(TEMP0, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(PV0, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(MV0, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(PARAM1, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(PARAM2, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(PARAM3, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(PARAM4, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(REVISION, 0, DEBUGFS_REV),
- DEBUGFS_ENTRY(PSTATE, 0, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(HEARTBEAT, 0, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(MAGIC, S_IWUSR, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(SIGMA_HVT, 0, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(SIGMA_SVT, 0, DEBUGFS_NORMAL),
- DEBUGFS_ENTRY(VOLTAGE1, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(TEMP1, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(PV1, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(MV1, 0, DEBUGFS_FLOAT),
- DEBUGFS_ENTRY(FREQUENCY, 0, DEBUGFS_NORMAL),
-};
-
-static int brcm_avs_target_index(struct cpufreq_policy *, unsigned int);
-
-static char *__strtolower(char *s)
-{
- char *p;
-
- for (p = s; *p; p++)
- *p = tolower(*p);
-
- return s;
-}
-
-#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
-
static void __iomem *__map_region(const char *name)
{
struct device_node *np;
@@ -494,7 +410,7 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
if (ret)
return ERR_PTR(ret);
- table = devm_kzalloc(dev, (AVS_PSTATE_MAX + 1) * sizeof(*table),
+ table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1, sizeof(*table),
GFP_KERNEL);
if (!table)
return ERR_PTR(-ENOMEM);
@@ -516,238 +432,6 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
return table;
}
-#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
-
-#define MANT(x) (unsigned int)(abs((x)) / 1000)
-#define FRAC(x) (unsigned int)(abs((x)) - abs((x)) / 1000 * 1000)
-
-static int brcm_avs_debug_show(struct seq_file *s, void *data)
-{
- struct debugfs_data *dbgfs = s->private;
- void __iomem *base;
- u32 val, offset;
-
- if (!dbgfs) {
- seq_puts(s, "No device pointer\n");
- return 0;
- }
-
- base = dbgfs->priv->base;
- offset = dbgfs->entry->offset;
- val = readl(base + offset);
- switch (dbgfs->entry->format) {
- case DEBUGFS_NORMAL:
- seq_printf(s, "%u\n", val);
- break;
- case DEBUGFS_FLOAT:
- seq_printf(s, "%d.%03d\n", MANT(val), FRAC(val));
- break;
- case DEBUGFS_REV:
- seq_printf(s, "%c.%c.%c.%c\n", (val >> 24 & 0xff),
- (val >> 16 & 0xff), (val >> 8 & 0xff),
- val & 0xff);
- break;
- }
- seq_printf(s, "0x%08x\n", val);
-
- return 0;
-}
-
-#undef MANT
-#undef FRAC
-
-static ssize_t brcm_avs_seq_write(struct file *file, const char __user *buf,
- size_t size, loff_t *ppos)
-{
- struct seq_file *s = file->private_data;
- struct debugfs_data *dbgfs = s->private;
- struct private_data *priv = dbgfs->priv;
- void __iomem *base, *avs_intr_base;
- bool use_issue_command = false;
- unsigned long val, offset;
- char str[128];
- int ret;
- char *str_ptr = str;
-
- if (size >= sizeof(str))
- return -E2BIG;
-
- memset(str, 0, sizeof(str));
- ret = copy_from_user(str, buf, size);
- if (ret)
- return ret;
-
- base = priv->base;
- avs_intr_base = priv->avs_intr_base;
- offset = dbgfs->entry->offset;
- /*
- * Special case writing to "command" entry only: if the string starts
- * with a 'c', we use the driver's __issue_avs_command() function.
- * Otherwise, we perform a raw write. This should allow testing of raw
- * access as well as using the higher level function. (Raw access
- * doesn't clear the firmware return status after issuing the command.)
- */
- if (str_ptr[0] == 'c' && offset == AVS_MBOX_COMMAND) {
- use_issue_command = true;
- str_ptr++;
- }
- if (kstrtoul(str_ptr, 0, &val) != 0)
- return -EINVAL;
-
- /*
- * Setting the P-state is a special case. We need to update the CPU
- * frequency we report.
- */
- if (val == AVS_CMD_SET_PSTATE) {
- struct cpufreq_policy *policy;
- unsigned int pstate;
-
- policy = cpufreq_cpu_get(smp_processor_id());
- /* Read back the P-state we are about to set */
- pstate = readl(base + AVS_MBOX_PARAM(0));
- if (use_issue_command) {
- ret = brcm_avs_target_index(policy, pstate);
- return ret ? ret : size;
- }
- policy->cur = policy->freq_table[pstate].frequency;
- }
-
- if (use_issue_command) {
- ret = __issue_avs_command(priv, val, false, NULL);
- } else {
- /* Locking here is not perfect, but is only for debug. */
- ret = down_interruptible(&priv->sem);
- if (ret)
- return ret;
-
- writel(val, base + offset);
- /* We have to wake up the firmware to process a command. */
- if (offset == AVS_MBOX_COMMAND)
- writel(AVS_CPU_L2_INT_MASK,
- avs_intr_base + AVS_CPU_L2_SET0);
- up(&priv->sem);
- }
-
- return ret ? ret : size;
-}
-
-static struct debugfs_entry *__find_debugfs_entry(const char *name)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++)
- if (strcasecmp(debugfs_entries[i].name, name) == 0)
- return &debugfs_entries[i];
-
- return NULL;
-}
-
-static int brcm_avs_debug_open(struct inode *inode, struct file *file)
-{
- struct debugfs_data *data;
- fmode_t fmode;
- int ret;
-
- /*
- * seq_open(), which is called by single_open(), clears "write" access.
- * We need write access to some files, so we preserve our access mode
- * and restore it.
- */
- fmode = file->f_mode;
- /*
- * Check access permissions even for root. We don't want to be writing
- * to read-only registers. Access for regular users has already been
- * checked by the VFS layer.
- */
- if ((fmode & FMODE_WRITER) && !(inode->i_mode & S_IWUSR))
- return -EACCES;
-
- data = kmalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
- /*
- * We use the same file system operations for all our debug files. To
- * produce specific output, we look up the file name upon opening a
- * debugfs entry and map it to a memory offset. This offset is then used
- * in the generic "show" function to read a specific register.
- */
- data->entry = __find_debugfs_entry(file->f_path.dentry->d_iname);
- data->priv = inode->i_private;
-
- ret = single_open(file, brcm_avs_debug_show, data);
- if (ret)
- kfree(data);
- file->f_mode = fmode;
-
- return ret;
-}
-
-static int brcm_avs_debug_release(struct inode *inode, struct file *file)
-{
- struct seq_file *seq_priv = file->private_data;
- struct debugfs_data *data = seq_priv->private;
-
- kfree(data);
- return single_release(inode, file);
-}
-
-static const struct file_operations brcm_avs_debug_ops = {
- .open = brcm_avs_debug_open,
- .read = seq_read,
- .write = brcm_avs_seq_write,
- .llseek = seq_lseek,
- .release = brcm_avs_debug_release,
-};
-
-static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev)
-{
- struct private_data *priv = platform_get_drvdata(pdev);
- struct dentry *dir;
- int i;
-
- if (!priv)
- return;
-
- dir = debugfs_create_dir(BRCM_AVS_CPUFREQ_NAME, NULL);
- if (IS_ERR_OR_NULL(dir))
- return;
- priv->debugfs = dir;
-
- for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++) {
- /*
- * The DEBUGFS_ENTRY macro generates uppercase strings. We
- * convert them to lowercase before creating the debugfs
- * entries.
- */
- char *entry = __strtolower(debugfs_entries[i].name);
- fmode_t mode = debugfs_entries[i].mode;
-
- if (!debugfs_create_file(entry, S_IFREG | S_IRUGO | mode,
- dir, priv, &brcm_avs_debug_ops)) {
- priv->debugfs = NULL;
- debugfs_remove_recursive(dir);
- break;
- }
- }
-}
-
-static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev)
-{
- struct private_data *priv = platform_get_drvdata(pdev);
-
- if (priv && priv->debugfs) {
- debugfs_remove_recursive(priv->debugfs);
- priv->debugfs = NULL;
- }
-}
-
-#else
-
-static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev) {}
-static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev) {}
-
-#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
-
/*
* To ensure the right firmware is running we need to
* - check the MAGIC matches what we expect
@@ -1016,11 +700,8 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
return ret;
brcm_avs_driver.driver_data = pdev;
- ret = cpufreq_register_driver(&brcm_avs_driver);
- if (!ret)
- brcm_avs_cpufreq_debug_init(pdev);
- return ret;
+ return cpufreq_register_driver(&brcm_avs_driver);
}
static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
@@ -1032,8 +713,6 @@ static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
if (ret)
return ret;
- brcm_avs_cpufreq_debug_exit(pdev);
-
priv = platform_get_drvdata(pdev);
iounmap(priv->base);
iounmap(priv->avs_intr_base);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index bc5fc1630876..a9d3eec32795 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -42,9 +42,6 @@
*/
static struct cppc_cpudata **all_cpu_data;
-/* Capture the max KHz from DMI */
-static u64 cppc_dmi_max_khz;
-
/* Callback function used to retrieve the max frequency from DMI */
static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
{
@@ -75,6 +72,64 @@ static u64 cppc_get_dmi_max_khz(void)
return (1000 * mhz);
}
+/*
+ * If CPPC lowest_freq and nominal_freq registers are exposed then we can
+ * use them to convert perf to freq and vice versa
+ *
+ * If the perf/freq point lies between Nominal and Lowest, we can treat
+ * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line
+ * and extrapolate the rest
+ * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion
+ */
+static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu,
+ unsigned int perf)
+{
+ static u64 max_khz;
+ struct cppc_perf_caps *caps = &cpu->perf_caps;
+ u64 mul, div;
+
+ if (caps->lowest_freq && caps->nominal_freq) {
+ if (perf >= caps->nominal_perf) {
+ mul = caps->nominal_freq;
+ div = caps->nominal_perf;
+ } else {
+ mul = caps->nominal_freq - caps->lowest_freq;
+ div = caps->nominal_perf - caps->lowest_perf;
+ }
+ } else {
+ if (!max_khz)
+ max_khz = cppc_get_dmi_max_khz();
+ mul = max_khz;
+ div = cpu->perf_caps.highest_perf;
+ }
+ return (u64)perf * mul / div;
+}
+
+static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu,
+ unsigned int freq)
+{
+ static u64 max_khz;
+ struct cppc_perf_caps *caps = &cpu->perf_caps;
+ u64 mul, div;
+
+ if (caps->lowest_freq && caps->nominal_freq) {
+ if (freq >= caps->nominal_freq) {
+ mul = caps->nominal_perf;
+ div = caps->nominal_freq;
+ } else {
+ mul = caps->lowest_perf;
+ div = caps->lowest_freq;
+ }
+ } else {
+ if (!max_khz)
+ max_khz = cppc_get_dmi_max_khz();
+ mul = cpu->perf_caps.highest_perf;
+ div = max_khz;
+ }
+
+ return (u64)freq * mul / div;
+}
+
static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
@@ -86,7 +141,7 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
cpu = all_cpu_data[policy->cpu];
- desired_perf = (u64)target_freq * cpu->perf_caps.highest_perf / cppc_dmi_max_khz;
+ desired_perf = cppc_cpufreq_khz_to_perf(cpu, target_freq);
/* Return if it is exactly the same perf */
if (desired_perf == cpu->perf_ctrls.desired_perf)
return ret;
@@ -126,6 +181,49 @@ static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
cpu->perf_caps.lowest_perf, cpu_num, ret);
}
+/*
+ * The PCC subspace describes the rate at which platform can accept commands
+ * on the shared PCC channel (including READs which do not count towards freq
+ * trasition requests), so ideally we need to use the PCC values as a fallback
+ * if we don't have a platform specific transition_delay_us
+ */
+#ifdef CONFIG_ARM64
+#include <asm/cputype.h>
+
+static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
+{
+ unsigned long implementor = read_cpuid_implementor();
+ unsigned long part_num = read_cpuid_part_number();
+ unsigned int delay_us = 0;
+
+ switch (implementor) {
+ case ARM_CPU_IMP_QCOM:
+ switch (part_num) {
+ case QCOM_CPU_PART_FALKOR_V1:
+ case QCOM_CPU_PART_FALKOR:
+ delay_us = 10000;
+ break;
+ default:
+ delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+ break;
+ }
+ break;
+ default:
+ delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+ break;
+ }
+
+ return delay_us;
+}
+
+#else
+
+static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
+{
+ return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+}
+#endif
+
static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
struct cppc_cpudata *cpu;
@@ -143,27 +241,26 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
return ret;
}
- cppc_dmi_max_khz = cppc_get_dmi_max_khz();
+ /* Convert the lowest and nominal freq from MHz to KHz */
+ cpu->perf_caps.lowest_freq *= 1000;
+ cpu->perf_caps.nominal_freq *= 1000;
/*
* Set min to lowest nonlinear perf to avoid any efficiency penalty (see
* Section 8.4.7.1.1.5 of ACPI 6.1 spec)
*/
- policy->min = cpu->perf_caps.lowest_nonlinear_perf * cppc_dmi_max_khz /
- cpu->perf_caps.highest_perf;
- policy->max = cppc_dmi_max_khz;
+ policy->min = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_nonlinear_perf);
+ policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
/*
* Set cpuinfo.min_freq to Lowest to make the full range of performance
* available if userspace wants to use any perf between lowest & lowest
* nonlinear perf
*/
- policy->cpuinfo.min_freq = cpu->perf_caps.lowest_perf * cppc_dmi_max_khz /
- cpu->perf_caps.highest_perf;
- policy->cpuinfo.max_freq = cppc_dmi_max_khz;
+ policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_perf);
+ policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
- policy->transition_delay_us = cppc_get_transition_latency(cpu_num) /
- NSEC_PER_USEC;
+ policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num);
policy->shared_type = cpu->shared_type;
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
@@ -187,7 +284,8 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpu->cur_policy = policy;
/* Set policy->cur to max now. The governors will adjust later. */
- policy->cur = cppc_dmi_max_khz;
+ policy->cur = cppc_cpufreq_perf_to_khz(cpu,
+ cpu->perf_caps.highest_perf);
cpu->perf_ctrls.desired_perf = cpu->perf_caps.highest_perf;
ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls);
@@ -215,7 +313,8 @@ static int __init cppc_cpufreq_init(void)
if (acpi_disabled)
return -ENODEV;
- all_cpu_data = kzalloc(sizeof(void *) * num_possible_cpus(), GFP_KERNEL);
+ all_cpu_data = kcalloc(num_possible_cpus(), sizeof(void *),
+ GFP_KERNEL);
if (!all_cpu_data)
return -ENOMEM;
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 3b585e4bfac5..fe14c57de6ca 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -66,8 +66,6 @@ static const struct of_device_id whitelist[] __initconst = {
{ .compatible = "renesas,r8a7792", },
{ .compatible = "renesas,r8a7793", },
{ .compatible = "renesas,r8a7794", },
- { .compatible = "renesas,r8a7795", },
- { .compatible = "renesas,r8a7796", },
{ .compatible = "renesas,sh73a0", },
{ .compatible = "rockchip,rk2928", },
@@ -118,6 +116,9 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "nvidia,tegra124", },
+ { .compatible = "qcom,apq8096", },
+ { .compatible = "qcom,msm8996", },
+
{ .compatible = "st,stih407", },
{ .compatible = "st,stih410", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 190ea0dccb79..0a9ebf00be46 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -346,8 +346,14 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (data && data->have_governor_per_policy)
- dt_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
+ if (data) {
+ if (data->have_governor_per_policy)
+ dt_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
+
+ dt_cpufreq_driver.resume = data->resume;
+ if (data->suspend)
+ dt_cpufreq_driver.suspend = data->suspend;
+ }
ret = cpufreq_register_driver(&dt_cpufreq_driver);
if (ret)
diff --git a/drivers/cpufreq/cpufreq-dt.h b/drivers/cpufreq/cpufreq-dt.h
index 54d774e46c43..d5aeea13433e 100644
--- a/drivers/cpufreq/cpufreq-dt.h
+++ b/drivers/cpufreq/cpufreq-dt.h
@@ -12,8 +12,13 @@
#include <linux/types.h>
+struct cpufreq_policy;
+
struct cpufreq_dt_platform_data {
bool have_governor_per_policy;
+
+ int (*suspend)(struct cpufreq_policy *policy);
+ int (*resume)(struct cpufreq_policy *policy);
};
#endif /* __CPUFREQ_DT_H__ */
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 075d18f6ba7a..b0dfd3222013 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -89,16 +89,7 @@ static void cpufreq_governor_limits(struct cpufreq_policy *policy);
* The mutex locks both lists.
*/
static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
-static struct srcu_notifier_head cpufreq_transition_notifier_list;
-
-static bool init_cpufreq_transition_notifier_list_called;
-static int __init init_cpufreq_transition_notifier_list(void)
-{
- srcu_init_notifier_head(&cpufreq_transition_notifier_list);
- init_cpufreq_transition_notifier_list_called = true;
- return 0;
-}
-pure_initcall(init_cpufreq_transition_notifier_list);
+SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
static int off __read_mostly;
static int cpufreq_disabled(void)
@@ -300,8 +291,19 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
#endif
}
-static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
- struct cpufreq_freqs *freqs, unsigned int state)
+/**
+ * cpufreq_notify_transition - Notify frequency transition and adjust_jiffies.
+ * @policy: cpufreq policy to enable fast frequency switching for.
+ * @freqs: contain details of the frequency update.
+ * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
+ *
+ * This function calls the transition notifiers and the "adjust_jiffies"
+ * function. It is called twice on all CPU frequency changes that have
+ * external effects.
+ */
+static void cpufreq_notify_transition(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs,
+ unsigned int state)
{
BUG_ON(irqs_disabled());
@@ -313,54 +315,44 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
state, freqs->new);
switch (state) {
-
case CPUFREQ_PRECHANGE:
- /* detect if the driver reported a value as "old frequency"
+ /*
+ * Detect if the driver reported a value as "old frequency"
* which is not equal to what the cpufreq core thinks is
* "old frequency".
*/
if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
- if ((policy) && (policy->cpu == freqs->cpu) &&
- (policy->cur) && (policy->cur != freqs->old)) {
+ if (policy->cur && (policy->cur != freqs->old)) {
pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
freqs->old, policy->cur);
freqs->old = policy->cur;
}
}
- srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
- CPUFREQ_PRECHANGE, freqs);
+
+ for_each_cpu(freqs->cpu, policy->cpus) {
+ srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
+ CPUFREQ_PRECHANGE, freqs);
+ }
+
adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
break;
case CPUFREQ_POSTCHANGE:
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
- pr_debug("FREQ: %lu - CPU: %lu\n",
- (unsigned long)freqs->new, (unsigned long)freqs->cpu);
- trace_cpu_frequency(freqs->new, freqs->cpu);
+ pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
+ cpumask_pr_args(policy->cpus));
+
+ for_each_cpu(freqs->cpu, policy->cpus) {
+ trace_cpu_frequency(freqs->new, freqs->cpu);
+ srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
+ CPUFREQ_POSTCHANGE, freqs);
+ }
+
cpufreq_stats_record_transition(policy, freqs->new);
- srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
- CPUFREQ_POSTCHANGE, freqs);
- if (likely(policy) && likely(policy->cpu == freqs->cpu))
- policy->cur = freqs->new;
- break;
+ policy->cur = freqs->new;
}
}
-/**
- * cpufreq_notify_transition - call notifier chain and adjust_jiffies
- * on frequency transition.
- *
- * This function calls the transition notifiers and the "adjust_jiffies"
- * function. It is called twice on all CPU frequency changes that have
- * external effects.
- */
-static void cpufreq_notify_transition(struct cpufreq_policy *policy,
- struct cpufreq_freqs *freqs, unsigned int state)
-{
- for_each_cpu(freqs->cpu, policy->cpus)
- __cpufreq_notify_transition(policy, freqs, state);
-}
-
/* Do post notifications when there are chances that transition has failed */
static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, int transition_failed)
@@ -696,6 +688,8 @@ static ssize_t store_##file_name \
struct cpufreq_policy new_policy; \
\
memcpy(&new_policy, policy, sizeof(*policy)); \
+ new_policy.min = policy->user_policy.min; \
+ new_policy.max = policy->user_policy.max; \
\
ret = sscanf(buf, "%u", &new_policy.object); \
if (ret != 1) \
@@ -1764,8 +1758,6 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
if (cpufreq_disabled())
return -EINVAL;
- WARN_ON(!init_cpufreq_transition_notifier_list_called);
-
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
mutex_lock(&cpufreq_fast_switch_lock);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index ca38229b045a..1d50e97d49f1 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -165,7 +165,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
* calls, so the previous load value can be used then.
*/
load = j_cdbs->prev_load;
- } else if (unlikely(time_elapsed > 2 * sampling_rate &&
+ } else if (unlikely((int)idle_time > 2 * sampling_rate &&
j_cdbs->prev_load)) {
/*
* If the CPU had gone completely idle and a task has
@@ -185,10 +185,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
* clear prev_load to guarantee that the load will be
* computed again next time.
*
- * Detecting this situation is easy: the governor's
- * utilization update handler would not have run during
- * CPU-idle periods. Hence, an unusually large
- * 'time_elapsed' (as compared to the sampling rate)
+ * Detecting this situation is easy: an unusually large
+ * 'idle_time' (as compared to the sampling rate)
* indicates this scenario.
*/
load = j_cdbs->prev_load;
@@ -217,8 +215,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
j_cdbs->prev_load = load;
}
- if (time_elapsed > 2 * sampling_rate) {
- unsigned int periods = time_elapsed / sampling_rate;
+ if (unlikely((int)idle_time > 2 * sampling_rate)) {
+ unsigned int periods = idle_time / sampling_rate;
if (periods < idle_periods)
idle_periods = periods;
@@ -278,7 +276,7 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
u64 delta_ns, lst;
- if (!cpufreq_can_do_remote_dvfs(policy_dbs->policy))
+ if (!cpufreq_this_cpu_can_update(policy_dbs->policy))
return;
/*
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index 7974a2fdb760..dd5440d3372d 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -241,8 +241,8 @@ acpi_cpufreq_cpu_init (
}
/* alloc freq_table */
- freq_table = kzalloc(sizeof(*freq_table) *
- (data->acpi_data.state_count + 1),
+ freq_table = kcalloc(data->acpi_data.state_count + 1,
+ sizeof(*freq_table),
GFP_KERNEL);
if (!freq_table) {
result = -ENOMEM;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 83cf631fc9bc..8b3c2a79ad6c 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -266,6 +266,8 @@ put_node:
}
#define OCOTP_CFG3_6UL_SPEED_696MHZ 0x2
+#define OCOTP_CFG3_6ULL_SPEED_792MHZ 0x2
+#define OCOTP_CFG3_6ULL_SPEED_900MHZ 0x3
static void imx6ul_opp_check_speed_grading(struct device *dev)
{
@@ -287,16 +289,30 @@ static void imx6ul_opp_check_speed_grading(struct device *dev)
* Speed GRADING[1:0] defines the max speed of ARM:
* 2b'00: Reserved;
* 2b'01: 528000000Hz;
- * 2b'10: 696000000Hz;
- * 2b'11: Reserved;
+ * 2b'10: 696000000Hz on i.MX6UL, 792000000Hz on i.MX6ULL;
+ * 2b'11: 900000000Hz on i.MX6ULL only;
* We need to set the max speed of ARM according to fuse map.
*/
val = readl_relaxed(base + OCOTP_CFG3);
val >>= OCOTP_CFG3_SPEED_SHIFT;
val &= 0x3;
- if (val != OCOTP_CFG3_6UL_SPEED_696MHZ)
- if (dev_pm_opp_disable(dev, 696000000))
- dev_warn(dev, "failed to disable 696MHz OPP\n");
+
+ if (of_machine_is_compatible("fsl,imx6ul")) {
+ if (val != OCOTP_CFG3_6UL_SPEED_696MHZ)
+ if (dev_pm_opp_disable(dev, 696000000))
+ dev_warn(dev, "failed to disable 696MHz OPP\n");
+ }
+
+ if (of_machine_is_compatible("fsl,imx6ull")) {
+ if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ)
+ if (dev_pm_opp_disable(dev, 792000000))
+ dev_warn(dev, "failed to disable 792MHz OPP\n");
+
+ if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ)
+ if (dev_pm_opp_disable(dev, 900000000))
+ dev_warn(dev, "failed to disable 900MHz OPP\n");
+ }
+
iounmap(base);
put_node:
of_node_put(np);
@@ -356,7 +372,8 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_reg;
}
- if (of_machine_is_compatible("fsl,imx6ul"))
+ if (of_machine_is_compatible("fsl,imx6ul") ||
+ of_machine_is_compatible("fsl,imx6ull"))
imx6ul_opp_check_speed_grading(cpu_dev);
else
imx6q_opp_check_speed_grading(cpu_dev);
@@ -377,7 +394,8 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
}
/* Make imx6_soc_volt array's size same as arm opp number */
- imx6_soc_volt = devm_kzalloc(cpu_dev, sizeof(*imx6_soc_volt) * num, GFP_KERNEL);
+ imx6_soc_volt = devm_kcalloc(cpu_dev, num, sizeof(*imx6_soc_volt),
+ GFP_KERNEL);
if (imx6_soc_volt == NULL) {
ret = -ENOMEM;
goto free_freq_table;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 17e566afbb41..1de5ec8d5ea3 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -221,6 +221,11 @@ struct global_params {
* preference/bias
* @epp_saved: Saved EPP/EPB during system suspend or CPU offline
* operation
+ * @hwp_req_cached: Cached value of the last HWP Request MSR
+ * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
+ * @last_io_update: Last time when IO wake flag was set
+ * @sched_flags: Store scheduler flags for possible cross CPU update
+ * @hwp_boost_min: Last HWP boosted min performance
*
* This structure stores per CPU instance data for all CPUs.
*/
@@ -253,6 +258,11 @@ struct cpudata {
s16 epp_policy;
s16 epp_default;
s16 epp_saved;
+ u64 hwp_req_cached;
+ u64 hwp_cap_cached;
+ u64 last_io_update;
+ unsigned int sched_flags;
+ u32 hwp_boost_min;
};
static struct cpudata **all_cpu_data;
@@ -285,6 +295,7 @@ static struct pstate_funcs pstate_funcs __read_mostly;
static int hwp_active __read_mostly;
static bool per_cpu_limits __read_mostly;
+static bool hwp_boost __read_mostly;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
@@ -689,6 +700,7 @@ static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
u64 cap;
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
+ WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap);
if (global.no_turbo)
*current_max = HWP_GUARANTEED_PERF(cap);
else
@@ -763,6 +775,7 @@ update_epp:
intel_pstate_set_epb(cpu, epp);
}
skip_epp:
+ WRITE_ONCE(cpu_data->hwp_req_cached, value);
wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
@@ -1020,6 +1033,30 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
return count;
}
+static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", hwp_boost);
+}
+
+static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &input);
+ if (ret)
+ return ret;
+
+ mutex_lock(&intel_pstate_driver_lock);
+ hwp_boost = !!input;
+ intel_pstate_update_policies();
+ mutex_unlock(&intel_pstate_driver_lock);
+
+ return count;
+}
+
show_one(max_perf_pct, max_perf_pct);
show_one(min_perf_pct, min_perf_pct);
@@ -1029,6 +1066,7 @@ define_one_global_rw(max_perf_pct);
define_one_global_rw(min_perf_pct);
define_one_global_ro(turbo_pct);
define_one_global_ro(num_pstates);
+define_one_global_rw(hwp_dynamic_boost);
static struct attribute *intel_pstate_attributes[] = {
&status.attr,
@@ -1069,6 +1107,11 @@ static void __init intel_pstate_sysfs_expose_params(void)
rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
WARN_ON(rc);
+ if (hwp_active) {
+ rc = sysfs_create_file(intel_pstate_kobject,
+ &hwp_dynamic_boost.attr);
+ WARN_ON(rc);
+ }
}
/************************** sysfs end ************************/
@@ -1381,6 +1424,116 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
intel_pstate_set_min_pstate(cpu);
}
+/*
+ * Long hold time will keep high perf limits for long time,
+ * which negatively impacts perf/watt for some workloads,
+ * like specpower. 3ms is based on experiements on some
+ * workoads.
+ */
+static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC;
+
+static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
+{
+ u64 hwp_req = READ_ONCE(cpu->hwp_req_cached);
+ u32 max_limit = (hwp_req & 0xff00) >> 8;
+ u32 min_limit = (hwp_req & 0xff);
+ u32 boost_level1;
+
+ /*
+ * Cases to consider (User changes via sysfs or boot time):
+ * If, P0 (Turbo max) = P1 (Guaranteed max) = min:
+ * No boost, return.
+ * If, P0 (Turbo max) > P1 (Guaranteed max) = min:
+ * Should result in one level boost only for P0.
+ * If, P0 (Turbo max) = P1 (Guaranteed max) > min:
+ * Should result in two level boost:
+ * (min + p1)/2 and P1.
+ * If, P0 (Turbo max) > P1 (Guaranteed max) > min:
+ * Should result in three level boost:
+ * (min + p1)/2, P1 and P0.
+ */
+
+ /* If max and min are equal or already at max, nothing to boost */
+ if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit)
+ return;
+
+ if (!cpu->hwp_boost_min)
+ cpu->hwp_boost_min = min_limit;
+
+ /* level at half way mark between min and guranteed */
+ boost_level1 = (HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) + min_limit) >> 1;
+
+ if (cpu->hwp_boost_min < boost_level1)
+ cpu->hwp_boost_min = boost_level1;
+ else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(cpu->hwp_cap_cached))
+ cpu->hwp_boost_min = HWP_GUARANTEED_PERF(cpu->hwp_cap_cached);
+ else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) &&
+ max_limit != HWP_GUARANTEED_PERF(cpu->hwp_cap_cached))
+ cpu->hwp_boost_min = max_limit;
+ else
+ return;
+
+ hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min;
+ wrmsrl(MSR_HWP_REQUEST, hwp_req);
+ cpu->last_update = cpu->sample.time;
+}
+
+static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
+{
+ if (cpu->hwp_boost_min) {
+ bool expired;
+
+ /* Check if we are idle for hold time to boost down */
+ expired = time_after64(cpu->sample.time, cpu->last_update +
+ hwp_boost_hold_time_ns);
+ if (expired) {
+ wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached);
+ cpu->hwp_boost_min = 0;
+ }
+ }
+ cpu->last_update = cpu->sample.time;
+}
+
+static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu,
+ u64 time)
+{
+ cpu->sample.time = time;
+
+ if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) {
+ bool do_io = false;
+
+ cpu->sched_flags = 0;
+ /*
+ * Set iowait_boost flag and update time. Since IO WAIT flag
+ * is set all the time, we can't just conclude that there is
+ * some IO bound activity is scheduled on this CPU with just
+ * one occurrence. If we receive at least two in two
+ * consecutive ticks, then we treat as boost candidate.
+ */
+ if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC))
+ do_io = true;
+
+ cpu->last_io_update = time;
+
+ if (do_io)
+ intel_pstate_hwp_boost_up(cpu);
+
+ } else {
+ intel_pstate_hwp_boost_down(cpu);
+ }
+}
+
+static inline void intel_pstate_update_util_hwp(struct update_util_data *data,
+ u64 time, unsigned int flags)
+{
+ struct cpudata *cpu = container_of(data, struct cpudata, update_util);
+
+ cpu->sched_flags |= flags;
+
+ if (smp_processor_id() == cpu->cpu)
+ intel_pstate_update_util_hwp_local(cpu, time);
+}
+
static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
{
struct sample *sample = &cpu->sample;
@@ -1641,6 +1794,12 @@ static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
{}
};
+static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = {
+ ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
+ ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_funcs),
+ {}
+};
+
static int intel_pstate_init_cpu(unsigned int cpunum)
{
struct cpudata *cpu;
@@ -1671,6 +1830,10 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
intel_pstate_disable_ee(cpunum);
intel_pstate_hwp_enable(cpu);
+
+ id = x86_match_cpu(intel_pstate_hwp_boost_ids);
+ if (id)
+ hwp_boost = true;
}
intel_pstate_get_cpu_pstates(cpu);
@@ -1684,7 +1847,7 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
{
struct cpudata *cpu = all_cpu_data[cpu_num];
- if (hwp_active)
+ if (hwp_active && !hwp_boost)
return;
if (cpu->update_util_set)
@@ -1693,7 +1856,9 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
/* Prevent intel_pstate_update_util() from using stale data. */
cpu->sample.time = 0;
cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
- intel_pstate_update_util);
+ (hwp_active ?
+ intel_pstate_update_util_hwp :
+ intel_pstate_update_util));
cpu->update_util_set = true;
}
@@ -1805,8 +1970,16 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
intel_pstate_set_update_util_hook(policy->cpu);
}
- if (hwp_active)
+ if (hwp_active) {
+ /*
+ * When hwp_boost was active before and dynamically it
+ * was turned off, in that case we need to clear the
+ * update util hook.
+ */
+ if (!hwp_boost)
+ intel_pstate_clear_update_util_hook(policy->cpu);
intel_pstate_hwp_set(policy->cpu);
+ }
mutex_unlock(&intel_pstate_limits_lock);
@@ -1939,13 +2112,51 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
return 0;
}
+/* Use of trace in passive mode:
+ *
+ * In passive mode the trace core_busy field (also known as the
+ * performance field, and lablelled as such on the graphs; also known as
+ * core_avg_perf) is not needed and so is re-assigned to indicate if the
+ * driver call was via the normal or fast switch path. Various graphs
+ * output from the intel_pstate_tracer.py utility that include core_busy
+ * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%,
+ * so we use 10 to indicate the the normal path through the driver, and
+ * 90 to indicate the fast switch path through the driver.
+ * The scaled_busy field is not used, and is set to 0.
+ */
+
+#define INTEL_PSTATE_TRACE_TARGET 10
+#define INTEL_PSTATE_TRACE_FAST_SWITCH 90
+
+static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate)
+{
+ struct sample *sample;
+
+ if (!trace_pstate_sample_enabled())
+ return;
+
+ if (!intel_pstate_sample(cpu, ktime_get()))
+ return;
+
+ sample = &cpu->sample;
+ trace_pstate_sample(trace_type,
+ 0,
+ old_pstate,
+ cpu->pstate.current_pstate,
+ sample->mperf,
+ sample->aperf,
+ sample->tsc,
+ get_avg_frequency(cpu),
+ fp_toint(cpu->iowait_boost * 100));
+}
+
static int intel_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
struct cpufreq_freqs freqs;
- int target_pstate;
+ int target_pstate, old_pstate;
update_turbo_state();
@@ -1965,12 +2176,14 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
break;
}
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
+ old_pstate = cpu->pstate.current_pstate;
if (target_pstate != cpu->pstate.current_pstate) {
cpu->pstate.current_pstate = target_pstate;
wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, target_pstate));
}
freqs.new = target_pstate * cpu->pstate.scaling;
+ intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_TARGET, old_pstate);
cpufreq_freq_transition_end(policy, &freqs, false);
return 0;
@@ -1980,13 +2193,15 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
- int target_pstate;
+ int target_pstate, old_pstate;
update_turbo_state();
target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
+ old_pstate = cpu->pstate.current_pstate;
intel_pstate_update_pstate(cpu, target_pstate);
+ intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
return target_pstate * cpu->pstate.scaling;
}
@@ -2297,7 +2512,7 @@ hwp_cpu_matched:
pr_info("Intel P-state driver initializing\n");
- all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
+ all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus()));
if (!all_cpu_data)
return -ENOMEM;
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 61a4c5b08219..279bd9e9fa95 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -474,8 +474,8 @@ static int longhaul_get_ranges(void)
return -EINVAL;
}
- longhaul_table = kzalloc((numscales + 1) * sizeof(*longhaul_table),
- GFP_KERNEL);
+ longhaul_table = kcalloc(numscales + 1, sizeof(*longhaul_table),
+ GFP_KERNEL);
if (!longhaul_table)
return -ENOMEM;
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 0591874856d3..54edaec1e608 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -679,6 +679,16 @@ void gpstate_timer_handler(struct timer_list *t)
if (!spin_trylock(&gpstates->gpstate_lock))
return;
+ /*
+ * If the timer has migrated to the different cpu then bring
+ * it back to one of the policy->cpus
+ */
+ if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) {
+ gpstates->timer.expires = jiffies + msecs_to_jiffies(1);
+ add_timer_on(&gpstates->timer, cpumask_first(policy->cpus));
+ spin_unlock(&gpstates->gpstate_lock);
+ return;
+ }
/*
* If PMCR was last updated was using fast_swtich then
@@ -718,10 +728,8 @@ void gpstate_timer_handler(struct timer_list *t)
if (gpstate_idx != gpstates->last_lpstate_idx)
queue_gpstate_timer(gpstates);
+ set_pstate(&freq_data);
spin_unlock(&gpstates->gpstate_lock);
-
- /* Timer may get migrated to a different cpu on cpu hot unplug */
- smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
}
/*
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c
index 7acc7fa4536d..9daa2cc318bb 100644
--- a/drivers/cpufreq/pxa3xx-cpufreq.c
+++ b/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -93,7 +93,7 @@ static int setup_freqs_table(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table;
int i;
- table = kzalloc((num + 1) * sizeof(*table), GFP_KERNEL);
+ table = kcalloc(num + 1, sizeof(*table), GFP_KERNEL);
if (table == NULL)
return -ENOMEM;
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
new file mode 100644
index 000000000000..d049fe4b80c4
--- /dev/null
+++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * In Certain QCOM SoCs like apq8096 and msm8996 that have KRYO processors,
+ * the CPU frequency subset and voltage value of each OPP varies
+ * based on the silicon variant in use. Qualcomm Process Voltage Scaling Tables
+ * defines the voltage and frequency value based on the msm-id in SMEM
+ * and speedbin blown in the efuse combination.
+ * The qcom-cpufreq-kryo driver reads the msm-id and efuse value from the SoC
+ * to provide the OPP framework with required information.
+ * This is used to determine the voltage and frequency value for each OPP of
+ * operating-points-v2 table when it is parsed by the OPP framework.
+ */
+
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem.h>
+
+#define MSM_ID_SMEM 137
+
+enum _msm_id {
+ MSM8996V3 = 0xF6ul,
+ APQ8096V3 = 0x123ul,
+ MSM8996SG = 0x131ul,
+ APQ8096SG = 0x138ul,
+};
+
+enum _msm8996_version {
+ MSM8996_V3,
+ MSM8996_SG,
+ NUM_OF_MSM8996_VERSIONS,
+};
+
+static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
+{
+ size_t len;
+ u32 *msm_id;
+ enum _msm8996_version version;
+
+ msm_id = qcom_smem_get(QCOM_SMEM_HOST_ANY, MSM_ID_SMEM, &len);
+ if (IS_ERR(msm_id))
+ return NUM_OF_MSM8996_VERSIONS;
+
+ /* The first 4 bytes are format, next to them is the actual msm-id */
+ msm_id++;
+
+ switch ((enum _msm_id)*msm_id) {
+ case MSM8996V3:
+ case APQ8096V3:
+ version = MSM8996_V3;
+ break;
+ case MSM8996SG:
+ case APQ8096SG:
+ version = MSM8996_SG;
+ break;
+ default:
+ version = NUM_OF_MSM8996_VERSIONS;
+ }
+
+ return version;
+}
+
+static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
+{
+ struct opp_table *opp_tables[NR_CPUS] = {0};
+ struct platform_device *cpufreq_dt_pdev;
+ enum _msm8996_version msm8996_version;
+ struct nvmem_cell *speedbin_nvmem;
+ struct device_node *np;
+ struct device *cpu_dev;
+ unsigned cpu;
+ u8 *speedbin;
+ u32 versions;
+ size_t len;
+ int ret;
+
+ cpu_dev = get_cpu_device(0);
+ if (NULL == cpu_dev)
+ ret = -ENODEV;
+
+ msm8996_version = qcom_cpufreq_kryo_get_msm_id();
+ if (NUM_OF_MSM8996_VERSIONS == msm8996_version) {
+ dev_err(cpu_dev, "Not Snapdragon 820/821!");
+ return -ENODEV;
+ }
+
+ np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ if (IS_ERR(np))
+ return PTR_ERR(np);
+
+ ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
+ if (!ret) {
+ of_node_put(np);
+ return -ENOENT;
+ }
+
+ speedbin_nvmem = of_nvmem_cell_get(np, NULL);
+ of_node_put(np);
+ if (IS_ERR(speedbin_nvmem)) {
+ dev_err(cpu_dev, "Could not get nvmem cell: %ld\n",
+ PTR_ERR(speedbin_nvmem));
+ return PTR_ERR(speedbin_nvmem);
+ }
+
+ speedbin = nvmem_cell_read(speedbin_nvmem, &len);
+ nvmem_cell_put(speedbin_nvmem);
+
+ switch (msm8996_version) {
+ case MSM8996_V3:
+ versions = 1 << (unsigned int)(*speedbin);
+ break;
+ case MSM8996_SG:
+ versions = 1 << ((unsigned int)(*speedbin) + 4);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ for_each_possible_cpu(cpu) {
+ cpu_dev = get_cpu_device(cpu);
+ if (NULL == cpu_dev) {
+ ret = -ENODEV;
+ goto free_opp;
+ }
+
+ opp_tables[cpu] = dev_pm_opp_set_supported_hw(cpu_dev,
+ &versions, 1);
+ if (IS_ERR(opp_tables[cpu])) {
+ ret = PTR_ERR(opp_tables[cpu]);
+ dev_err(cpu_dev, "Failed to set supported hardware\n");
+ goto free_opp;
+ }
+ }
+
+ cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
+ NULL, 0);
+ if (!IS_ERR(cpufreq_dt_pdev))
+ return 0;
+
+ ret = PTR_ERR(cpufreq_dt_pdev);
+ dev_err(cpu_dev, "Failed to register platform device\n");
+
+free_opp:
+ for_each_possible_cpu(cpu) {
+ if (IS_ERR_OR_NULL(opp_tables[cpu]))
+ break;
+ dev_pm_opp_put_supported_hw(opp_tables[cpu]);
+ }
+
+ return ret;
+}
+
+static struct platform_driver qcom_cpufreq_kryo_driver = {
+ .probe = qcom_cpufreq_kryo_probe,
+ .driver = {
+ .name = "qcom-cpufreq-kryo",
+ },
+};
+
+static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = {
+ { .compatible = "qcom,apq8096", },
+ { .compatible = "qcom,msm8996", },
+};
+
+/*
+ * Since the driver depends on smem and nvmem drivers, which may
+ * return EPROBE_DEFER, all the real activity is done in the probe,
+ * which may be defered as well. The init here is only registering
+ * the driver and the platform device.
+ */
+static int __init qcom_cpufreq_kryo_init(void)
+{
+ struct device_node *np = of_find_node_by_path("/");
+ const struct of_device_id *match;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(qcom_cpufreq_kryo_match_list, np);
+ of_node_put(np);
+ if (!match)
+ return -ENODEV;
+
+ ret = platform_driver_register(&qcom_cpufreq_kryo_driver);
+ if (unlikely(ret < 0))
+ return ret;
+
+ ret = PTR_ERR_OR_ZERO(platform_device_register_simple(
+ "qcom-cpufreq-kryo", -1, NULL, 0));
+ if (0 == ret)
+ return 0;
+
+ platform_driver_unregister(&qcom_cpufreq_kryo_driver);
+ return ret;
+}
+module_init(qcom_cpufreq_kryo_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Kryo CPUfreq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c
index d0d75b65ddd6..d2f67b7a20dd 100644
--- a/drivers/cpufreq/s3c2440-cpufreq.c
+++ b/drivers/cpufreq/s3c2440-cpufreq.c
@@ -143,7 +143,7 @@ static void s3c2440_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
{
unsigned long clkdiv, camdiv;
- s3c_freq_dbg("%s: divsiors: h=%d, p=%d\n", __func__,
+ s3c_freq_dbg("%s: divisors: h=%d, p=%d\n", __func__,
cfg->divs.h_divisor, cfg->divs.p_divisor);
clkdiv = __raw_readl(S3C2410_CLKDIVN);
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 909bd6e27639..3b291a2b0cb3 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -562,7 +562,7 @@ static int s3c_cpufreq_build_freq(void)
size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
size++;
- ftab = kzalloc(sizeof(*ftab) * size, GFP_KERNEL);
+ ftab = kcalloc(size, sizeof(*ftab), GFP_KERNEL);
if (!ftab)
return -ENOMEM;
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index b4dbc77459b6..50b1551ba894 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -117,7 +117,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
return -ENODEV;
}
- ret = handle->perf_ops->add_opps_to_device(handle, cpu_dev);
+ ret = handle->perf_ops->device_opps_add(handle, cpu_dev);
if (ret) {
dev_warn(cpu_dev, "failed to add opps to the device\n");
return ret;
@@ -164,7 +164,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
/* SCMI allows DVFS request for any domain from any CPU */
policy->dvfs_possible_from_any_cpu = true;
- latency = handle->perf_ops->get_transition_latency(handle, cpu_dev);
+ latency = handle->perf_ops->transition_latency_get(handle, cpu_dev);
if (!latency)
latency = CPUFREQ_ETERNAL;
diff --git a/drivers/cpufreq/sfi-cpufreq.c b/drivers/cpufreq/sfi-cpufreq.c
index 9767afe05da2..978770432b13 100644
--- a/drivers/cpufreq/sfi-cpufreq.c
+++ b/drivers/cpufreq/sfi-cpufreq.c
@@ -95,8 +95,8 @@ static int __init sfi_cpufreq_init(void)
if (ret)
return ret;
- freq_table = kzalloc(sizeof(*freq_table) *
- (num_freq_table_entries + 1), GFP_KERNEL);
+ freq_table = kcalloc(num_freq_table_entries + 1, sizeof(*freq_table),
+ GFP_KERNEL);
if (!freq_table) {
ret = -ENOMEM;
goto err_free_array;
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 195f27f9c1cb..4074e2615522 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -195,7 +195,7 @@ static int spear_cpufreq_probe(struct platform_device *pdev)
cnt = prop->length / sizeof(u32);
val = prop->value;
- freq_tbl = kzalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL);
+ freq_tbl = kcalloc(cnt + 1, sizeof(*freq_tbl), GFP_KERNEL);
if (!freq_tbl) {
ret = -ENOMEM;
goto out_put_node;
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index e3a9962ee410..cabb6f48eb77 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -252,7 +252,7 @@ EXPORT_SYMBOL_GPL(speedstep_get_frequency);
*********************************************************************/
/* Keep in sync with the x86_cpu_id tables in the different modules */
-unsigned int speedstep_detect_processor(void)
+enum speedstep_processor speedstep_detect_processor(void)
{
struct cpuinfo_x86 *c = &cpu_data(0);
u32 ebx, msr_lo, msr_hi;
diff --git a/drivers/cpufreq/tegra20-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c
index 2bd62845e9d5..05f57dcd5215 100644
--- a/drivers/cpufreq/tegra20-cpufreq.c
+++ b/drivers/cpufreq/tegra20-cpufreq.c
@@ -16,16 +16,13 @@
*
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/sched.h>
+#include <linux/clk.h>
#include <linux/cpufreq.h>
-#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
static struct cpufreq_frequency_table freq_table[] = {
{ .frequency = 216000 },
@@ -39,25 +36,27 @@ static struct cpufreq_frequency_table freq_table[] = {
{ .frequency = CPUFREQ_TABLE_END },
};
-#define NUM_CPUS 2
-
-static struct clk *cpu_clk;
-static struct clk *pll_x_clk;
-static struct clk *pll_p_clk;
-static struct clk *emc_clk;
-static bool pll_x_prepared;
+struct tegra20_cpufreq {
+ struct device *dev;
+ struct cpufreq_driver driver;
+ struct clk *cpu_clk;
+ struct clk *pll_x_clk;
+ struct clk *pll_p_clk;
+ bool pll_x_prepared;
+};
static unsigned int tegra_get_intermediate(struct cpufreq_policy *policy,
unsigned int index)
{
- unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000;
+ struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
+ unsigned int ifreq = clk_get_rate(cpufreq->pll_p_clk) / 1000;
/*
* Don't switch to intermediate freq if:
* - we are already at it, i.e. policy->cur == ifreq
* - index corresponds to ifreq
*/
- if ((freq_table[index].frequency == ifreq) || (policy->cur == ifreq))
+ if (freq_table[index].frequency == ifreq || policy->cur == ifreq)
return 0;
return ifreq;
@@ -66,6 +65,7 @@ static unsigned int tegra_get_intermediate(struct cpufreq_policy *policy,
static int tegra_target_intermediate(struct cpufreq_policy *policy,
unsigned int index)
{
+ struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
int ret;
/*
@@ -78,47 +78,37 @@ static int tegra_target_intermediate(struct cpufreq_policy *policy,
* Also, we wouldn't be using pll_x anymore and must not take extra
* reference to it, as it can be disabled now to save some power.
*/
- clk_prepare_enable(pll_x_clk);
+ clk_prepare_enable(cpufreq->pll_x_clk);
- ret = clk_set_parent(cpu_clk, pll_p_clk);
+ ret = clk_set_parent(cpufreq->cpu_clk, cpufreq->pll_p_clk);
if (ret)
- clk_disable_unprepare(pll_x_clk);
+ clk_disable_unprepare(cpufreq->pll_x_clk);
else
- pll_x_prepared = true;
+ cpufreq->pll_x_prepared = true;
return ret;
}
static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
{
+ struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
unsigned long rate = freq_table[index].frequency;
- unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000;
- int ret = 0;
-
- /*
- * Vote on memory bus frequency based on cpu frequency
- * This sets the minimum frequency, display or avp may request higher
- */
- if (rate >= 816000)
- clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */
- else if (rate >= 456000)
- clk_set_rate(emc_clk, 300000000); /* cpu 456 MHz, emc 150Mhz */
- else
- clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */
+ unsigned int ifreq = clk_get_rate(cpufreq->pll_p_clk) / 1000;
+ int ret;
/*
* target freq == pll_p, don't need to take extra reference to pll_x_clk
* as it isn't used anymore.
*/
if (rate == ifreq)
- return clk_set_parent(cpu_clk, pll_p_clk);
+ return clk_set_parent(cpufreq->cpu_clk, cpufreq->pll_p_clk);
- ret = clk_set_rate(pll_x_clk, rate * 1000);
+ ret = clk_set_rate(cpufreq->pll_x_clk, rate * 1000);
/* Restore to earlier frequency on error, i.e. pll_x */
if (ret)
- pr_err("Failed to change pll_x to %lu\n", rate);
+ dev_err(cpufreq->dev, "Failed to change pll_x to %lu\n", rate);
- ret = clk_set_parent(cpu_clk, pll_x_clk);
+ ret = clk_set_parent(cpufreq->cpu_clk, cpufreq->pll_x_clk);
/* This shouldn't fail while changing or restoring */
WARN_ON(ret);
@@ -126,9 +116,9 @@ static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
* Drop count to pll_x clock only if we switched to intermediate freq
* earlier while transitioning to a target frequency.
*/
- if (pll_x_prepared) {
- clk_disable_unprepare(pll_x_clk);
- pll_x_prepared = false;
+ if (cpufreq->pll_x_prepared) {
+ clk_disable_unprepare(cpufreq->pll_x_clk);
+ cpufreq->pll_x_prepared = false;
}
return ret;
@@ -136,81 +126,111 @@ static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
static int tegra_cpu_init(struct cpufreq_policy *policy)
{
+ struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
int ret;
- if (policy->cpu >= NUM_CPUS)
- return -EINVAL;
-
- clk_prepare_enable(emc_clk);
- clk_prepare_enable(cpu_clk);
+ clk_prepare_enable(cpufreq->cpu_clk);
/* FIXME: what's the actual transition time? */
ret = cpufreq_generic_init(policy, freq_table, 300 * 1000);
if (ret) {
- clk_disable_unprepare(cpu_clk);
- clk_disable_unprepare(emc_clk);
+ clk_disable_unprepare(cpufreq->cpu_clk);
return ret;
}
- policy->clk = cpu_clk;
+ policy->clk = cpufreq->cpu_clk;
policy->suspend_freq = freq_table[0].frequency;
return 0;
}
static int tegra_cpu_exit(struct cpufreq_policy *policy)
{
- clk_disable_unprepare(cpu_clk);
- clk_disable_unprepare(emc_clk);
+ struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
+
+ clk_disable_unprepare(cpufreq->cpu_clk);
return 0;
}
-static struct cpufreq_driver tegra_cpufreq_driver = {
- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = cpufreq_generic_frequency_table_verify,
- .get_intermediate = tegra_get_intermediate,
- .target_intermediate = tegra_target_intermediate,
- .target_index = tegra_target,
- .get = cpufreq_generic_get,
- .init = tegra_cpu_init,
- .exit = tegra_cpu_exit,
- .name = "tegra",
- .attr = cpufreq_generic_attr,
- .suspend = cpufreq_generic_suspend,
-};
-
-static int __init tegra_cpufreq_init(void)
+static int tegra20_cpufreq_probe(struct platform_device *pdev)
{
- cpu_clk = clk_get_sys(NULL, "cclk");
- if (IS_ERR(cpu_clk))
- return PTR_ERR(cpu_clk);
-
- pll_x_clk = clk_get_sys(NULL, "pll_x");
- if (IS_ERR(pll_x_clk))
- return PTR_ERR(pll_x_clk);
-
- pll_p_clk = clk_get_sys(NULL, "pll_p");
- if (IS_ERR(pll_p_clk))
- return PTR_ERR(pll_p_clk);
-
- emc_clk = clk_get_sys("cpu", "emc");
- if (IS_ERR(emc_clk)) {
- clk_put(cpu_clk);
- return PTR_ERR(emc_clk);
+ struct tegra20_cpufreq *cpufreq;
+ int err;
+
+ cpufreq = devm_kzalloc(&pdev->dev, sizeof(*cpufreq), GFP_KERNEL);
+ if (!cpufreq)
+ return -ENOMEM;
+
+ cpufreq->cpu_clk = clk_get_sys(NULL, "cclk");
+ if (IS_ERR(cpufreq->cpu_clk))
+ return PTR_ERR(cpufreq->cpu_clk);
+
+ cpufreq->pll_x_clk = clk_get_sys(NULL, "pll_x");
+ if (IS_ERR(cpufreq->pll_x_clk)) {
+ err = PTR_ERR(cpufreq->pll_x_clk);
+ goto put_cpu;
+ }
+
+ cpufreq->pll_p_clk = clk_get_sys(NULL, "pll_p");
+ if (IS_ERR(cpufreq->pll_p_clk)) {
+ err = PTR_ERR(cpufreq->pll_p_clk);
+ goto put_pll_x;
}
- return cpufreq_register_driver(&tegra_cpufreq_driver);
+ cpufreq->dev = &pdev->dev;
+ cpufreq->driver.get = cpufreq_generic_get;
+ cpufreq->driver.attr = cpufreq_generic_attr;
+ cpufreq->driver.init = tegra_cpu_init;
+ cpufreq->driver.exit = tegra_cpu_exit;
+ cpufreq->driver.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK;
+ cpufreq->driver.verify = cpufreq_generic_frequency_table_verify;
+ cpufreq->driver.suspend = cpufreq_generic_suspend;
+ cpufreq->driver.driver_data = cpufreq;
+ cpufreq->driver.target_index = tegra_target;
+ cpufreq->driver.get_intermediate = tegra_get_intermediate;
+ cpufreq->driver.target_intermediate = tegra_target_intermediate;
+ snprintf(cpufreq->driver.name, CPUFREQ_NAME_LEN, "tegra");
+
+ err = cpufreq_register_driver(&cpufreq->driver);
+ if (err)
+ goto put_pll_p;
+
+ platform_set_drvdata(pdev, cpufreq);
+
+ return 0;
+
+put_pll_p:
+ clk_put(cpufreq->pll_p_clk);
+put_pll_x:
+ clk_put(cpufreq->pll_x_clk);
+put_cpu:
+ clk_put(cpufreq->cpu_clk);
+
+ return err;
}
-static void __exit tegra_cpufreq_exit(void)
+static int tegra20_cpufreq_remove(struct platform_device *pdev)
{
- cpufreq_unregister_driver(&tegra_cpufreq_driver);
- clk_put(emc_clk);
- clk_put(cpu_clk);
+ struct tegra20_cpufreq *cpufreq = platform_get_drvdata(pdev);
+
+ cpufreq_unregister_driver(&cpufreq->driver);
+
+ clk_put(cpufreq->pll_p_clk);
+ clk_put(cpufreq->pll_x_clk);
+ clk_put(cpufreq->cpu_clk);
+
+ return 0;
}
+static struct platform_driver tegra20_cpufreq_driver = {
+ .probe = tegra20_cpufreq_probe,
+ .remove = tegra20_cpufreq_remove,
+ .driver = {
+ .name = "tegra20-cpufreq",
+ },
+};
+module_platform_driver(tegra20_cpufreq_driver);
+MODULE_ALIAS("platform:tegra20-cpufreq");
MODULE_AUTHOR("Colin Cross <ccross@android.com>");
-MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2");
+MODULE_DESCRIPTION("NVIDIA Tegra20 cpufreq driver");
MODULE_LICENSE("GPL");
-module_init(tegra_cpufreq_init);
-module_exit(tegra_cpufreq_exit);
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 6ba709b6f095..3f0e2a14895a 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -217,7 +217,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
if (!match)
return -ENODEV;
- opp_data = kzalloc(sizeof(*opp_data), GFP_KERNEL);
+ opp_data = devm_kzalloc(&pdev->dev, sizeof(*opp_data), GFP_KERNEL);
if (!opp_data)
return -ENOMEM;
@@ -226,8 +226,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
opp_data->cpu_dev = get_cpu_device(0);
if (!opp_data->cpu_dev) {
pr_err("%s: Failed to get device for CPU0\n", __func__);
- ret = ENODEV;
- goto free_opp_data;
+ return -ENODEV;
}
opp_data->opp_node = dev_pm_opp_of_get_opp_desc_node(opp_data->cpu_dev);
@@ -285,8 +284,6 @@ register_cpufreq_dt:
fail_put_node:
of_node_put(opp_data->opp_node);
-free_opp_data:
- kfree(opp_data);
return ret;
}