From 8bf1ac723639c4260f76df0e45ee23aa35a23067 Mon Sep 17 00:00:00 2001 From: viresh kumar Date: Tue, 23 Oct 2012 01:23:33 +0200 Subject: cpufreq / core: Fix typo in comment describing show_bios_limit() show_bios_limit is mistakenly written as show_scaling_driver in a comment describing purpose of show_bios_limit() routine. Fix it. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index fb8a5279c5d8..021973b7dc56 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -581,7 +581,7 @@ static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) } /** - * show_scaling_driver - show the current cpufreq HW/BIOS limitation + * show_bios_limit - show the current cpufreq HW/BIOS limitation */ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) { -- cgit v1.2.3 From 4b972f0b04eaae645b22d99479b9aea43c3d64e7 Mon Sep 17 00:00:00 2001 From: viresh kumar Date: Tue, 23 Oct 2012 01:23:43 +0200 Subject: cpufreq / core: Fix printing of governor and driver name Arrays for governer and driver name are of size CPUFREQ_NAME_LEN or 16. i.e. 15 bytes for name and 1 for trailing '\0'. When cpufreq driver print these names (for sysfs), it includes '\n' or ' ' in the fmt string and still passes length as CPUFREQ_NAME_LEN. If the driver or governor names are using all 15 fields allocated to them, then the trailing '\n' or ' ' will never be printed. And so commands like: root@linaro-developer# cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_driver will print something like: cpufreq_foodrvroot@linaro-developer# Fix this by increasing print length by one character. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 6 +++--- include/linux/cpufreq.h | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 021973b7dc56..db6e337ad337 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -445,7 +445,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) return sprintf(buf, "performance\n"); else if (policy->governor) - return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", + return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", policy->governor->name); return -EINVAL; } @@ -491,7 +491,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, */ static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) { - return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name); + return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name); } /** @@ -512,7 +512,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2))) goto out; - i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name); + i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); } out: i += sprintf(&buf[i], "\n"); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index b60f6ba01d0c..fc4b78510151 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -22,6 +22,8 @@ #include #define CPUFREQ_NAME_LEN 16 +/* Print length for names. Extra 1 space for accomodating '\n' in prints */ +#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1) /********************************************************************* -- cgit v1.2.3 From 2aacdfff9c6958723aa5076003247933cefc32ea Mon Sep 17 00:00:00 2001 From: viresh kumar Date: Tue, 23 Oct 2012 01:28:05 +0200 Subject: cpufreq: Move common part from governors to separate file, v2 Multiple cpufreq governers have defined similar get_cpu_idle_time_***() routines. These routines must be moved to some common place, so that all governors can use them. So moving them to cpufreq_governor.c, which seems to be a better place for keeping these routines. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/Makefile | 4 +-- drivers/cpufreq/cpufreq_conservative.c | 34 ---------------------- drivers/cpufreq/cpufreq_governor.c | 52 ++++++++++++++++++++++++++++++++++ drivers/cpufreq/cpufreq_ondemand.c | 34 ---------------------- include/linux/cpufreq.h | 5 ++++ 5 files changed, 59 insertions(+), 70 deletions(-) create mode 100644 drivers/cpufreq/cpufreq_governor.c diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 1bc90e1306d8..5b1413e47216 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -7,8 +7,8 @@ obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o -obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o -obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o +obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o cpufreq_governor.o +obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o cpufreq_governor.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index a152af7e1991..181abad07266 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -95,40 +95,6 @@ static struct dbs_tuners { .freq_step = 5, }; -static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) -{ - u64 idle_time; - u64 cur_wall_time; - u64 busy_time; - - cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - - busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; - - idle_time = cur_wall_time - busy_time; - if (wall) - *wall = jiffies_to_usecs(cur_wall_time); - - return jiffies_to_usecs(idle_time); -} - -static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) -{ - u64 idle_time = get_cpu_idle_time_us(cpu, NULL); - - if (idle_time == -1ULL) - return get_cpu_idle_time_jiffy(cpu, wall); - else - idle_time += get_cpu_iowait_time_us(cpu, wall); - - return idle_time; -} - /* keep track of frequency transitions */ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c new file mode 100644 index 000000000000..0001071cdcdb --- /dev/null +++ b/drivers/cpufreq/cpufreq_governor.c @@ -0,0 +1,52 @@ +/* + * drivers/cpufreq/cpufreq_governor.c + * + * CPUFREQ governors common code + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +/* + * Code picked from earlier governer implementations + */ +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) +{ + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + + idle_time = cur_wall_time - busy_time; + if (wall) + *wall = jiffies_to_usecs(cur_wall_time); + + return jiffies_to_usecs(idle_time); +} + +cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, NULL); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + else + idle_time += get_cpu_iowait_time_us(cpu, wall); + + return idle_time; +} +EXPORT_SYMBOL_GPL(get_cpu_idle_time); diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 396322f2a83f..d7f774bb49dd 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -119,40 +119,6 @@ static struct dbs_tuners { .powersave_bias = 0, }; -static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) -{ - u64 idle_time; - u64 cur_wall_time; - u64 busy_time; - - cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - - busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; - - idle_time = cur_wall_time - busy_time; - if (wall) - *wall = jiffies_to_usecs(cur_wall_time); - - return jiffies_to_usecs(idle_time); -} - -static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) -{ - u64 idle_time = get_cpu_idle_time_us(cpu, NULL); - - if (idle_time == -1ULL) - return get_cpu_idle_time_jiffy(cpu, wall); - else - idle_time += get_cpu_iowait_time_us(cpu, wall); - - return idle_time; -} - static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) { u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index fc4b78510151..d03c21993052 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -11,6 +11,7 @@ #ifndef _LINUX_CPUFREQ_H #define _LINUX_CPUFREQ_H +#include #include #include #include @@ -407,5 +408,9 @@ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, void cpufreq_frequency_table_put_attr(unsigned int cpu); +/********************************************************************* + * Governor Helpers * + *********************************************************************/ +cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall); #endif /* _LINUX_CPUFREQ_H */ -- cgit v1.2.3 From db7011516cbfc3d867b77721f77258d36cfbf705 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Tue, 23 Oct 2012 01:29:03 +0200 Subject: cpufreq: Improve debug prints With debug options on, it is difficult to locate cpufreq core's debug prints. Fix this by prefixing debug prints with KBUILD_MODNAME. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 2 ++ drivers/cpufreq/cpufreq_performance.c | 2 ++ drivers/cpufreq/cpufreq_powersave.c | 2 ++ drivers/cpufreq/cpufreq_userspace.c | 2 ++ drivers/cpufreq/freq_table.c | 2 ++ 5 files changed, 10 insertions(+) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index db6e337ad337..85df5387bc61 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -15,6 +15,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c index f13a8a9af6a1..ceee06849b91 100644 --- a/drivers/cpufreq/cpufreq_performance.c +++ b/drivers/cpufreq/cpufreq_performance.c @@ -10,6 +10,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c index 4c2eb512f2bc..2d948a171155 100644 --- a/drivers/cpufreq/cpufreq_powersave.c +++ b/drivers/cpufreq/cpufreq_powersave.c @@ -10,6 +10,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index bedac1aa9be3..c8c3d293cc57 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -11,6 +11,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index 90431cb92804..49cda256efb2 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -9,6 +9,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include -- cgit v1.2.3 From 8636fd280e970696be62c8495a5aacb5f3b6237d Mon Sep 17 00:00:00 2001 From: Andreas Schwab Date: Wed, 24 Oct 2012 22:16:34 +0200 Subject: cpufreq: fix jiffies/cputime mixup in conservative/ondemand governors The function get_cpu_idle_time_jiffy in both the conservative and ondemand governors use jiffies_to_usecs to convert a cputime value to usecs which gives the wrong value on architectures where cputime and jiffies use different units. Only matters if NO_HZ is disabled, since otherwise get_cpu_idle_time_us should already return a valid value, and get_cpu_idle_time_jiffy isn't actually called. Signed-off-by: Andreas Schwab Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq_governor.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 0001071cdcdb..679842a8d34a 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -33,9 +33,9 @@ static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) idle_time = cur_wall_time - busy_time; if (wall) - *wall = jiffies_to_usecs(cur_wall_time); + *wall = cputime_to_usecs(cur_wall_time); - return jiffies_to_usecs(idle_time); + return cputime_to_usecs(idle_time); } cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) -- cgit v1.2.3 From 0676f7f2e7d2adec11f40320ca43a8897b8ef906 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 24 Oct 2012 23:39:48 +0200 Subject: cpufreq: return early from __cpufreq_driver_getavg() There is no need to do cpufreq_get_cpu() and cpufreq_put_cpu() for drivers that don't support getavg() routine. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 85df5387bc61..f552d5fe0f8f 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1511,12 +1511,14 @@ int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu) { int ret = 0; + if (!(cpu_online(cpu) && cpufreq_driver->getavg)) + return 0; + policy = cpufreq_cpu_get(policy->cpu); if (!policy) return -EINVAL; - if (cpu_online(cpu) && cpufreq_driver->getavg) - ret = cpufreq_driver->getavg(policy, cpu); + ret = cpufreq_driver->getavg(policy, cpu); cpufreq_cpu_put(policy); return ret; -- cgit v1.2.3 From 4471a34f9a1f2da220272e823bdb8e8fa83a7661 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 26 Oct 2012 00:47:42 +0200 Subject: cpufreq: governors: remove redundant code Initially ondemand governor was written and then using its code conservative governor is written. It used a lot of code from ondemand governor, but copy of code was created instead of using the same routines from both governors. Which increased code redundancy, which is difficult to manage. This patch is an attempt to move common part of both the governors to cpufreq_governor.c file to come over above mentioned issues. This shouldn't change anything from functionality point of view. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq_conservative.c | 548 ++++++++------------------ drivers/cpufreq/cpufreq_governor.c | 276 ++++++++++++- drivers/cpufreq/cpufreq_governor.h | 177 +++++++++ drivers/cpufreq/cpufreq_ondemand.c | 698 +++++++++++---------------------- include/linux/cpufreq.h | 6 - 5 files changed, 832 insertions(+), 873 deletions(-) create mode 100644 drivers/cpufreq/cpufreq_governor.h diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 181abad07266..64ef737e7e72 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -11,83 +11,30 @@ * published by the Free Software Foundation. */ -#include -#include -#include #include -#include -#include +#include +#include #include +#include +#include #include -#include -#include -#include -#include +#include +#include +#include +#include -/* - * dbs is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ +#include "cpufreq_governor.h" +/* Conservative governor macors */ #define DEF_FREQUENCY_UP_THRESHOLD (80) #define DEF_FREQUENCY_DOWN_THRESHOLD (20) - -/* - * The polling frequency of this governor depends on the capability of - * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling - * rate. - * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) - * this governor will not work. - * All times here are in uS. - */ -#define MIN_SAMPLING_RATE_RATIO (2) - -static unsigned int min_sampling_rate; - -#define LATENCY_MULTIPLIER (1000) -#define MIN_LATENCY_MULTIPLIER (100) #define DEF_SAMPLING_DOWN_FACTOR (1) #define MAX_SAMPLING_DOWN_FACTOR (10) -#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) - -static void do_dbs_timer(struct work_struct *work); - -struct cpu_dbs_info_s { - cputime64_t prev_cpu_idle; - cputime64_t prev_cpu_wall; - cputime64_t prev_cpu_nice; - struct cpufreq_policy *cur_policy; - struct delayed_work work; - unsigned int down_skip; - unsigned int requested_freq; - int cpu; - unsigned int enable:1; - /* - * percpu mutex that serializes governor limit change with - * do_dbs_timer invocation. We do not want do_dbs_timer to run - * when user is changing the governor or limits. - */ - struct mutex timer_mutex; -}; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); -static unsigned int dbs_enable; /* number of CPUs using this policy */ +static struct dbs_data cs_dbs_data; +static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info); -/* - * dbs_mutex protects dbs_enable in governor start/stop. - */ -static DEFINE_MUTEX(dbs_mutex); - -static struct dbs_tuners { - unsigned int sampling_rate; - unsigned int sampling_down_factor; - unsigned int up_threshold; - unsigned int down_threshold; - unsigned int ignore_nice; - unsigned int freq_step; -} dbs_tuners_ins = { +static struct cs_dbs_tuners cs_tuners = { .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, @@ -95,61 +42,121 @@ static struct dbs_tuners { .freq_step = 5, }; -/* keep track of frequency transitions */ -static int -dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, - void *data) +/* + * Every sampling_rate, we check, if current idle time is less than 20% + * (default), then we try to increase frequency Every sampling_rate * + * sampling_down_factor, we check, if current idle time is more than 80%, then + * we try to decrease frequency + * + * Any frequency increase takes it to the maximum frequency. Frequency reduction + * happens at minimum steps of 5% (default) of maximum frequency + */ +static void cs_check_cpu(int cpu, unsigned int load) { - struct cpufreq_freqs *freq = data; - struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, - freq->cpu); + struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); + struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; + unsigned int freq_target; + + /* + * break out if we 'cannot' reduce the speed as the user might + * want freq_step to be zero + */ + if (cs_tuners.freq_step == 0) + return; + + /* Check for frequency increase */ + if (load > cs_tuners.up_threshold) { + dbs_info->down_skip = 0; + + /* if we are already at full speed then break out early */ + if (dbs_info->requested_freq == policy->max) + return; + + freq_target = (cs_tuners.freq_step * policy->max) / 100; + + /* max freq cannot be less than 100. But who knows.... */ + if (unlikely(freq_target == 0)) + freq_target = 5; + + dbs_info->requested_freq += freq_target; + if (dbs_info->requested_freq > policy->max) + dbs_info->requested_freq = policy->max; + __cpufreq_driver_target(policy, dbs_info->requested_freq, + CPUFREQ_RELATION_H); + return; + } + + /* + * The optimal frequency is the frequency that is the lowest that can + * support the current CPU usage without triggering the up policy. To be + * safe, we focus 10 points under the threshold. + */ + if (load < (cs_tuners.down_threshold - 10)) { + freq_target = (cs_tuners.freq_step * policy->max) / 100; + + dbs_info->requested_freq -= freq_target; + if (dbs_info->requested_freq < policy->min) + dbs_info->requested_freq = policy->min; + + /* + * if we cannot reduce the frequency anymore, break out early + */ + if (policy->cur == policy->min) + return; + + __cpufreq_driver_target(policy, dbs_info->requested_freq, + CPUFREQ_RELATION_H); + return; + } +} + +static void cs_dbs_timer(struct work_struct *work) +{ + struct cs_cpu_dbs_info_s *dbs_info = container_of(work, + struct cs_cpu_dbs_info_s, cdbs.work.work); + unsigned int cpu = dbs_info->cdbs.cpu; + int delay = delay_for_sampling_rate(cs_tuners.sampling_rate); + + mutex_lock(&dbs_info->cdbs.timer_mutex); + + dbs_check_cpu(&cs_dbs_data, cpu); + + schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay); + mutex_unlock(&dbs_info->cdbs.timer_mutex); +} + +static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_freqs *freq = data; + struct cs_cpu_dbs_info_s *dbs_info = + &per_cpu(cs_cpu_dbs_info, freq->cpu); struct cpufreq_policy *policy; - if (!this_dbs_info->enable) + if (!dbs_info->enable) return 0; - policy = this_dbs_info->cur_policy; + policy = dbs_info->cdbs.cur_policy; /* - * we only care if our internally tracked freq moves outside - * the 'valid' ranges of freqency available to us otherwise - * we do not change it + * we only care if our internally tracked freq moves outside the 'valid' + * ranges of freqency available to us otherwise we do not change it */ - if (this_dbs_info->requested_freq > policy->max - || this_dbs_info->requested_freq < policy->min) - this_dbs_info->requested_freq = freq->new; + if (dbs_info->requested_freq > policy->max + || dbs_info->requested_freq < policy->min) + dbs_info->requested_freq = freq->new; return 0; } -static struct notifier_block dbs_cpufreq_notifier_block = { - .notifier_call = dbs_cpufreq_notifier -}; - /************************** sysfs interface ************************/ static ssize_t show_sampling_rate_min(struct kobject *kobj, struct attribute *attr, char *buf) { - return sprintf(buf, "%u\n", min_sampling_rate); + return sprintf(buf, "%u\n", cs_dbs_data.min_sampling_rate); } -define_one_global_ro(sampling_rate_min); - -/* cpufreq_conservative Governor Tunables */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct kobject *kobj, struct attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ -} -show_one(sampling_rate, sampling_rate); -show_one(sampling_down_factor, sampling_down_factor); -show_one(up_threshold, up_threshold); -show_one(down_threshold, down_threshold); -show_one(ignore_nice_load, ignore_nice); -show_one(freq_step, freq_step); - static ssize_t store_sampling_down_factor(struct kobject *a, struct attribute *b, const char *buf, size_t count) @@ -161,7 +168,7 @@ static ssize_t store_sampling_down_factor(struct kobject *a, if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) return -EINVAL; - dbs_tuners_ins.sampling_down_factor = input; + cs_tuners.sampling_down_factor = input; return count; } @@ -175,7 +182,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, if (ret != 1) return -EINVAL; - dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + cs_tuners.sampling_rate = max(input, cs_dbs_data.min_sampling_rate); return count; } @@ -186,11 +193,10 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, int ret; ret = sscanf(buf, "%u", &input); - if (ret != 1 || input > 100 || - input <= dbs_tuners_ins.down_threshold) + if (ret != 1 || input > 100 || input <= cs_tuners.down_threshold) return -EINVAL; - dbs_tuners_ins.up_threshold = input; + cs_tuners.up_threshold = input; return count; } @@ -203,21 +209,19 @@ static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, /* cannot be lower than 11 otherwise freq will not fall */ if (ret != 1 || input < 11 || input > 100 || - input >= dbs_tuners_ins.up_threshold) + input >= cs_tuners.up_threshold) return -EINVAL; - dbs_tuners_ins.down_threshold = input; + cs_tuners.down_threshold = input; return count; } static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, const char *buf, size_t count) { - unsigned int input; + unsigned int input, j; int ret; - unsigned int j; - ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; @@ -225,19 +229,20 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, if (input > 1) input = 1; - if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */ + if (input == cs_tuners.ignore_nice) /* nothing to do */ return count; - dbs_tuners_ins.ignore_nice = input; + cs_tuners.ignore_nice = input; /* we need to re-evaluate prev_cpu_idle */ for_each_online_cpu(j) { - struct cpu_dbs_info_s *dbs_info; + struct cs_cpu_dbs_info_s *dbs_info; dbs_info = &per_cpu(cs_cpu_dbs_info, j); - dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->cdbs.prev_cpu_wall); + if (cs_tuners.ignore_nice) + dbs_info->cdbs.prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } return count; } @@ -255,18 +260,28 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b, if (input > 100) input = 100; - /* no need to test here if freq_step is zero as the user might actually - * want this, they would be crazy though :) */ - dbs_tuners_ins.freq_step = input; + /* + * no need to test here if freq_step is zero as the user might actually + * want this, they would be crazy though :) + */ + cs_tuners.freq_step = input; return count; } +show_one(cs, sampling_rate, sampling_rate); +show_one(cs, sampling_down_factor, sampling_down_factor); +show_one(cs, up_threshold, up_threshold); +show_one(cs, down_threshold, down_threshold); +show_one(cs, ignore_nice_load, ignore_nice); +show_one(cs, freq_step, freq_step); + define_one_global_rw(sampling_rate); define_one_global_rw(sampling_down_factor); define_one_global_rw(up_threshold); define_one_global_rw(down_threshold); define_one_global_rw(ignore_nice_load); define_one_global_rw(freq_step); +define_one_global_ro(sampling_rate_min); static struct attribute *dbs_attributes[] = { &sampling_rate_min.attr, @@ -279,283 +294,38 @@ static struct attribute *dbs_attributes[] = { NULL }; -static struct attribute_group dbs_attr_group = { +static struct attribute_group cs_attr_group = { .attrs = dbs_attributes, .name = "conservative", }; /************************** sysfs end ************************/ -static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) -{ - unsigned int load = 0; - unsigned int max_load = 0; - unsigned int freq_target; - - struct cpufreq_policy *policy; - unsigned int j; - - policy = this_dbs_info->cur_policy; - - /* - * Every sampling_rate, we check, if current idle time is less - * than 20% (default), then we try to increase frequency - * Every sampling_rate*sampling_down_factor, we check, if current - * idle time is more than 80%, then we try to decrease frequency - * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of - * 5% (default) of maximum frequency - */ - - /* Get Absolute Load */ - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - cputime64_t cur_wall_time, cur_idle_time; - unsigned int idle_time, wall_time; - - j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); - - cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - - wall_time = (unsigned int) - (cur_wall_time - j_dbs_info->prev_cpu_wall); - j_dbs_info->prev_cpu_wall = cur_wall_time; - - idle_time = (unsigned int) - (cur_idle_time - j_dbs_info->prev_cpu_idle); - j_dbs_info->prev_cpu_idle = cur_idle_time; - - if (dbs_tuners_ins.ignore_nice) { - u64 cur_nice; - unsigned long cur_nice_jiffies; - - cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - - j_dbs_info->prev_cpu_nice; - /* - * Assumption: nice time between sampling periods will - * be less than 2^32 jiffies for 32 bit sys - */ - cur_nice_jiffies = (unsigned long) - cputime64_to_jiffies64(cur_nice); - - j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - idle_time += jiffies_to_usecs(cur_nice_jiffies); - } - - if (unlikely(!wall_time || wall_time < idle_time)) - continue; - - load = 100 * (wall_time - idle_time) / wall_time; - - if (load > max_load) - max_load = load; - } +define_get_cpu_dbs_routines(cs_cpu_dbs_info); - /* - * break out if we 'cannot' reduce the speed as the user might - * want freq_step to be zero - */ - if (dbs_tuners_ins.freq_step == 0) - return; - - /* Check for frequency increase */ - if (max_load > dbs_tuners_ins.up_threshold) { - this_dbs_info->down_skip = 0; - - /* if we are already at full speed then break out early */ - if (this_dbs_info->requested_freq == policy->max) - return; - - freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; - - /* max freq cannot be less than 100. But who knows.... */ - if (unlikely(freq_target == 0)) - freq_target = 5; - - this_dbs_info->requested_freq += freq_target; - if (this_dbs_info->requested_freq > policy->max) - this_dbs_info->requested_freq = policy->max; - - __cpufreq_driver_target(policy, this_dbs_info->requested_freq, - CPUFREQ_RELATION_H); - return; - } - - /* - * The optimal frequency is the frequency that is the lowest that - * can support the current CPU usage without triggering the up - * policy. To be safe, we focus 10 points under the threshold. - */ - if (max_load < (dbs_tuners_ins.down_threshold - 10)) { - freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; - - this_dbs_info->requested_freq -= freq_target; - if (this_dbs_info->requested_freq < policy->min) - this_dbs_info->requested_freq = policy->min; - - /* - * if we cannot reduce the frequency anymore, break out early - */ - if (policy->cur == policy->min) - return; - - __cpufreq_driver_target(policy, this_dbs_info->requested_freq, - CPUFREQ_RELATION_H); - return; - } -} - -static void do_dbs_timer(struct work_struct *work) -{ - struct cpu_dbs_info_s *dbs_info = - container_of(work, struct cpu_dbs_info_s, work.work); - unsigned int cpu = dbs_info->cpu; - - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - delay -= jiffies % delay; - - mutex_lock(&dbs_info->timer_mutex); - - dbs_check_cpu(dbs_info); - - schedule_delayed_work_on(cpu, &dbs_info->work, delay); - mutex_unlock(&dbs_info->timer_mutex); -} - -static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) -{ - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - delay -= jiffies % delay; +static struct notifier_block cs_cpufreq_notifier_block = { + .notifier_call = dbs_cpufreq_notifier, +}; - dbs_info->enable = 1; - INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer); - schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); -} +static struct cs_ops cs_ops = { + .notifier_block = &cs_cpufreq_notifier_block, +}; -static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) -{ - dbs_info->enable = 0; - cancel_delayed_work_sync(&dbs_info->work); -} +static struct dbs_data cs_dbs_data = { + .governor = GOV_CONSERVATIVE, + .attr_group = &cs_attr_group, + .tuners = &cs_tuners, + .get_cpu_cdbs = get_cpu_cdbs, + .get_cpu_dbs_info_s = get_cpu_dbs_info_s, + .gov_dbs_timer = cs_dbs_timer, + .gov_check_cpu = cs_check_cpu, + .gov_ops = &cs_ops, +}; -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, +static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event) { - unsigned int cpu = policy->cpu; - struct cpu_dbs_info_s *this_dbs_info; - unsigned int j; - int rc; - - this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - - mutex_lock(&dbs_mutex); - - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); - j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - j_dbs_info->prev_cpu_nice = - kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - } - this_dbs_info->cpu = cpu; - this_dbs_info->down_skip = 0; - this_dbs_info->requested_freq = policy->cur; - - mutex_init(&this_dbs_info->timer_mutex); - dbs_enable++; - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 1) { - unsigned int latency; - /* policy latency is in nS. Convert it to uS first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - - rc = sysfs_create_group(cpufreq_global_kobject, - &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - /* - * conservative does not implement micro like ondemand - * governor, thus we are bound to jiffes/HZ - */ - min_sampling_rate = - MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); - /* Bring kernel and HW constraints together */ - min_sampling_rate = max(min_sampling_rate, - MIN_LATENCY_MULTIPLIER * latency); - dbs_tuners_ins.sampling_rate = - max(min_sampling_rate, - latency * LATENCY_MULTIPLIER); - - cpufreq_register_notifier( - &dbs_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - } - mutex_unlock(&dbs_mutex); - - dbs_timer_init(this_dbs_info); - - break; - - case CPUFREQ_GOV_STOP: - dbs_timer_exit(this_dbs_info); - - mutex_lock(&dbs_mutex); - dbs_enable--; - mutex_destroy(&this_dbs_info->timer_mutex); - - /* - * Stop the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 0) - cpufreq_unregister_notifier( - &dbs_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - - mutex_unlock(&dbs_mutex); - if (!dbs_enable) - sysfs_remove_group(cpufreq_global_kobject, - &dbs_attr_group); - - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&this_dbs_info->timer_mutex); - if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target( - this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target( - this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); - dbs_check_cpu(this_dbs_info); - mutex_unlock(&this_dbs_info->timer_mutex); - - break; - } - return 0; + return cpufreq_governor_dbs(&cs_dbs_data, policy, event); } #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE @@ -563,13 +333,14 @@ static #endif struct cpufreq_governor cpufreq_gov_conservative = { .name = "conservative", - .governor = cpufreq_governor_dbs, + .governor = cs_cpufreq_governor_dbs, .max_transition_latency = TRANSITION_LATENCY_LIMIT, .owner = THIS_MODULE, }; static int __init cpufreq_gov_dbs_init(void) { + mutex_init(&cs_dbs_data.mutex); return cpufreq_register_governor(&cpufreq_gov_conservative); } @@ -578,7 +349,6 @@ static void __exit cpufreq_gov_dbs_exit(void) cpufreq_unregister_governor(&cpufreq_gov_conservative); } - MODULE_AUTHOR("Alexander Clouter "); MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for " "Low Latency Frequency Transition capable processors " diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 679842a8d34a..5ea2c829a796 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -3,19 +3,31 @@ * * CPUFREQ governors common code * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * (C) 2003 Jun Nakajima + * (C) 2009 Alexander Clouter + * (c) 2012 Viresh Kumar + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include +#include +#include #include #include +#include #include #include -/* - * Code picked from earlier governer implementations - */ +#include + +#include "cpufreq_governor.h" + static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) { u64 idle_time; @@ -33,9 +45,9 @@ static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) idle_time = cur_wall_time - busy_time; if (wall) - *wall = cputime_to_usecs(cur_wall_time); + *wall = jiffies_to_usecs(cur_wall_time); - return cputime_to_usecs(idle_time); + return jiffies_to_usecs(idle_time); } cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) @@ -50,3 +62,257 @@ cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) return idle_time; } EXPORT_SYMBOL_GPL(get_cpu_idle_time); + +void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) +{ + struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu); + struct od_dbs_tuners *od_tuners = dbs_data->tuners; + struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; + struct cpufreq_policy *policy; + unsigned int max_load = 0; + unsigned int ignore_nice; + unsigned int j; + + if (dbs_data->governor == GOV_ONDEMAND) + ignore_nice = od_tuners->ignore_nice; + else + ignore_nice = cs_tuners->ignore_nice; + + policy = cdbs->cur_policy; + + /* Get Absolute Load (in terms of freq for ondemand gov) */ + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_common_info *j_cdbs; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + unsigned int load; + + j_cdbs = dbs_data->get_cpu_cdbs(j); + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + + wall_time = (unsigned int) + (cur_wall_time - j_cdbs->prev_cpu_wall); + j_cdbs->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) + (cur_idle_time - j_cdbs->prev_cpu_idle); + j_cdbs->prev_cpu_idle = cur_idle_time; + + if (ignore_nice) { + u64 cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + cdbs->prev_cpu_nice; + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + cdbs->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + if (dbs_data->governor == GOV_ONDEMAND) { + struct od_cpu_dbs_info_s *od_j_dbs_info = + dbs_data->get_cpu_dbs_info_s(cpu); + + cur_iowait_time = get_cpu_iowait_time_us(j, + &cur_wall_time); + if (cur_iowait_time == -1ULL) + cur_iowait_time = 0; + + iowait_time = (unsigned int) (cur_iowait_time - + od_j_dbs_info->prev_cpu_iowait); + od_j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + /* + * For the purpose of ondemand, waiting for disk IO is + * an indication that you're performance critical, and + * not that the system is actually idle. So subtract the + * iowait time from the cpu idle time. + */ + if (od_tuners->io_is_busy && idle_time >= iowait_time) + idle_time -= iowait_time; + } + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + + if (dbs_data->governor == GOV_ONDEMAND) { + int freq_avg = __cpufreq_driver_getavg(policy, j); + if (freq_avg <= 0) + freq_avg = policy->cur; + + load *= freq_avg; + } + + if (load > max_load) + max_load = load; + } + + dbs_data->gov_check_cpu(cpu, max_load); +} +EXPORT_SYMBOL_GPL(dbs_check_cpu); + +static inline void dbs_timer_init(struct dbs_data *dbs_data, + struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate) +{ + int delay = delay_for_sampling_rate(sampling_rate); + + INIT_DEFERRABLE_WORK(&cdbs->work, dbs_data->gov_dbs_timer); + schedule_delayed_work_on(cdbs->cpu, &cdbs->work, delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_common_info *cdbs) +{ + cancel_delayed_work_sync(&cdbs->work); +} + +int cpufreq_governor_dbs(struct dbs_data *dbs_data, + struct cpufreq_policy *policy, unsigned int event) +{ + struct od_cpu_dbs_info_s *od_dbs_info = NULL; + struct cs_cpu_dbs_info_s *cs_dbs_info = NULL; + struct od_dbs_tuners *od_tuners = dbs_data->tuners; + struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; + struct cpu_dbs_common_info *cpu_cdbs; + unsigned int *sampling_rate, latency, ignore_nice, j, cpu = policy->cpu; + int rc; + + cpu_cdbs = dbs_data->get_cpu_cdbs(cpu); + + if (dbs_data->governor == GOV_CONSERVATIVE) { + cs_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu); + sampling_rate = &cs_tuners->sampling_rate; + ignore_nice = cs_tuners->ignore_nice; + } else { + od_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu); + sampling_rate = &od_tuners->sampling_rate; + ignore_nice = od_tuners->ignore_nice; + } + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&dbs_data->mutex); + + dbs_data->enable++; + cpu_cdbs->cpu = cpu; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_common_info *j_cdbs; + j_cdbs = dbs_data->get_cpu_cdbs(j); + + j_cdbs->cur_policy = policy; + j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, + &j_cdbs->prev_cpu_wall); + if (ignore_nice) + j_cdbs->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + + /* + * Start the timerschedule work, when this governor is used for + * first time + */ + if (dbs_data->enable != 1) + goto second_time; + + rc = sysfs_create_group(cpufreq_global_kobject, + dbs_data->attr_group); + if (rc) { + mutex_unlock(&dbs_data->mutex); + return rc; + } + + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + + /* + * conservative does not implement micro like ondemand + * governor, thus we are bound to jiffes/HZ + */ + if (dbs_data->governor == GOV_CONSERVATIVE) { + struct cs_ops *ops = dbs_data->gov_ops; + + cpufreq_register_notifier(ops->notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * + jiffies_to_usecs(10); + } else { + struct od_ops *ops = dbs_data->gov_ops; + + od_tuners->io_is_busy = ops->io_busy(); + } + + /* Bring kernel and HW constraints together */ + dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + *sampling_rate = max(dbs_data->min_sampling_rate, latency * + LATENCY_MULTIPLIER); + +second_time: + if (dbs_data->governor == GOV_CONSERVATIVE) { + cs_dbs_info->down_skip = 0; + cs_dbs_info->enable = 1; + cs_dbs_info->requested_freq = policy->cur; + } else { + struct od_ops *ops = dbs_data->gov_ops; + od_dbs_info->rate_mult = 1; + od_dbs_info->sample_type = OD_NORMAL_SAMPLE; + ops->powersave_bias_init_cpu(cpu); + } + mutex_unlock(&dbs_data->mutex); + + mutex_init(&cpu_cdbs->timer_mutex); + dbs_timer_init(dbs_data, cpu_cdbs, *sampling_rate); + break; + + case CPUFREQ_GOV_STOP: + if (dbs_data->governor == GOV_CONSERVATIVE) + cs_dbs_info->enable = 0; + + dbs_timer_exit(cpu_cdbs); + + mutex_lock(&dbs_data->mutex); + mutex_destroy(&cpu_cdbs->timer_mutex); + dbs_data->enable--; + if (!dbs_data->enable) { + struct cs_ops *ops = dbs_data->gov_ops; + + sysfs_remove_group(cpufreq_global_kobject, + dbs_data->attr_group); + if (dbs_data->governor == GOV_CONSERVATIVE) + cpufreq_unregister_notifier(ops->notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + mutex_unlock(&dbs_data->mutex); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&cpu_cdbs->timer_mutex); + if (policy->max < cpu_cdbs->cur_policy->cur) + __cpufreq_driver_target(cpu_cdbs->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > cpu_cdbs->cur_policy->cur) + __cpufreq_driver_target(cpu_cdbs->cur_policy, + policy->min, CPUFREQ_RELATION_L); + dbs_check_cpu(dbs_data, cpu); + mutex_unlock(&cpu_cdbs->timer_mutex); + break; + } + return 0; +} +EXPORT_SYMBOL_GPL(cpufreq_governor_dbs); diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h new file mode 100644 index 000000000000..34e14adfc3f9 --- /dev/null +++ b/drivers/cpufreq/cpufreq_governor.h @@ -0,0 +1,177 @@ +/* + * drivers/cpufreq/cpufreq_governor.h + * + * Header file for CPUFreq governors common code + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * (C) 2003 Jun Nakajima + * (C) 2009 Alexander Clouter + * (c) 2012 Viresh Kumar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _CPUFREQ_GOVERNER_H +#define _CPUFREQ_GOVERNER_H + +#include +#include +#include +#include +#include +#include + +/* + * The polling frequency depends on the capability of the processor. Default + * polling frequency is 1000 times the transition latency of the processor. The + * governor will work on any processor with transition latency <= 10mS, using + * appropriate sampling rate. + * + * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) + * this governor will not work. All times here are in uS. + */ +#define MIN_SAMPLING_RATE_RATIO (2) +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +/* Ondemand Sampling types */ +enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE}; + +/* Macro creating sysfs show routines */ +#define show_one(_gov, file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", _gov##_tuners.object); \ +} + +#define define_get_cpu_dbs_routines(_dbs_info) \ +static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu) \ +{ \ + return &per_cpu(_dbs_info, cpu).cdbs; \ +} \ + \ +static void *get_cpu_dbs_info_s(int cpu) \ +{ \ + return &per_cpu(_dbs_info, cpu); \ +} + +/* + * Abbreviations: + * dbs: used as a shortform for demand based switching It helps to keep variable + * names smaller, simpler + * cdbs: common dbs + * on_*: On-demand governor + * cs_*: Conservative governor + */ + +/* Per cpu structures */ +struct cpu_dbs_common_info { + int cpu; + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + /* + * percpu mutex that serializes governor limit change with gov_dbs_timer + * invocation. We do not want gov_dbs_timer to run when user is changing + * the governor or limits. + */ + struct mutex timer_mutex; +}; + +struct od_cpu_dbs_info_s { + struct cpu_dbs_common_info cdbs; + cputime64_t prev_cpu_iowait; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_lo; + unsigned int freq_lo_jiffies; + unsigned int freq_hi_jiffies; + unsigned int rate_mult; + unsigned int sample_type:1; +}; + +struct cs_cpu_dbs_info_s { + struct cpu_dbs_common_info cdbs; + unsigned int down_skip; + unsigned int requested_freq; + unsigned int enable:1; +}; + +/* Governers sysfs tunables */ +struct od_dbs_tuners { + unsigned int ignore_nice; + unsigned int sampling_rate; + unsigned int sampling_down_factor; + unsigned int up_threshold; + unsigned int down_differential; + unsigned int powersave_bias; + unsigned int io_is_busy; +}; + +struct cs_dbs_tuners { + unsigned int ignore_nice; + unsigned int sampling_rate; + unsigned int sampling_down_factor; + unsigned int up_threshold; + unsigned int down_threshold; + unsigned int freq_step; +}; + +/* Per Governer data */ +struct dbs_data { + /* Common across governors */ + #define GOV_ONDEMAND 0 + #define GOV_CONSERVATIVE 1 + int governor; + unsigned int min_sampling_rate; + unsigned int enable; /* number of CPUs using this policy */ + struct attribute_group *attr_group; + void *tuners; + + /* dbs_mutex protects dbs_enable in governor start/stop */ + struct mutex mutex; + + struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu); + void *(*get_cpu_dbs_info_s)(int cpu); + void (*gov_dbs_timer)(struct work_struct *work); + void (*gov_check_cpu)(int cpu, unsigned int load); + + /* Governor specific ops, see below */ + void *gov_ops; +}; + +/* Governor specific ops, will be passed to dbs_data->gov_ops */ +struct od_ops { + int (*io_busy)(void); + void (*powersave_bias_init_cpu)(int cpu); + unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy, + unsigned int freq_next, unsigned int relation); + void (*freq_increase)(struct cpufreq_policy *p, unsigned int freq); +}; + +struct cs_ops { + struct notifier_block *notifier_block; +}; + +static inline int delay_for_sampling_rate(unsigned int sampling_rate) +{ + int delay = usecs_to_jiffies(sampling_rate); + + /* We want all CPUs to do sampling nearly on same jiffy */ + if (num_online_cpus() > 1) + delay -= jiffies % delay; + + return delay; +} + +cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall); +void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); +int cpufreq_governor_dbs(struct dbs_data *dbs_data, + struct cpufreq_policy *policy, unsigned int event); +#endif /* _CPUFREQ_GOVERNER_H */ diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index d7f774bb49dd..bdaab9206303 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -10,24 +10,23 @@ * published by the Free Software Foundation. */ -#include -#include -#include +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include -#include -#include +#include +#include #include +#include +#include #include -#include +#include +#include #include -#include -#include +#include -/* - * dbs is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ +#include "cpufreq_governor.h" +/* On-demand governor macors */ #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) #define DEF_FREQUENCY_UP_THRESHOLD (80) #define DEF_SAMPLING_DOWN_FACTOR (1) @@ -38,80 +37,10 @@ #define MIN_FREQUENCY_UP_THRESHOLD (11) #define MAX_FREQUENCY_UP_THRESHOLD (100) -/* - * The polling frequency of this governor depends on the capability of - * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling - * rate. - * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) - * this governor will not work. - * All times here are in uS. - */ -#define MIN_SAMPLING_RATE_RATIO (2) - -static unsigned int min_sampling_rate; - -#define LATENCY_MULTIPLIER (1000) -#define MIN_LATENCY_MULTIPLIER (100) -#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) - -static void do_dbs_timer(struct work_struct *work); -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND -static -#endif -struct cpufreq_governor cpufreq_gov_ondemand = { - .name = "ondemand", - .governor = cpufreq_governor_dbs, - .max_transition_latency = TRANSITION_LATENCY_LIMIT, - .owner = THIS_MODULE, -}; - -/* Sampling types */ -enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; - -struct cpu_dbs_info_s { - cputime64_t prev_cpu_idle; - cputime64_t prev_cpu_iowait; - cputime64_t prev_cpu_wall; - cputime64_t prev_cpu_nice; - struct cpufreq_policy *cur_policy; - struct delayed_work work; - struct cpufreq_frequency_table *freq_table; - unsigned int freq_lo; - unsigned int freq_lo_jiffies; - unsigned int freq_hi_jiffies; - unsigned int rate_mult; - int cpu; - unsigned int sample_type:1; - /* - * percpu mutex that serializes governor limit change with - * do_dbs_timer invocation. We do not want do_dbs_timer to run - * when user is changing the governor or limits. - */ - struct mutex timer_mutex; -}; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); - -static unsigned int dbs_enable; /* number of CPUs using this policy */ +static struct dbs_data od_dbs_data; +static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info); -/* - * dbs_mutex protects dbs_enable in governor start/stop. - */ -static DEFINE_MUTEX(dbs_mutex); - -static struct dbs_tuners { - unsigned int sampling_rate; - unsigned int up_threshold; - unsigned int down_differential; - unsigned int ignore_nice; - unsigned int sampling_down_factor; - unsigned int powersave_bias; - unsigned int io_is_busy; -} dbs_tuners_ins = { +static struct od_dbs_tuners od_tuners = { .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, @@ -119,14 +48,35 @@ static struct dbs_tuners { .powersave_bias = 0, }; -static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) +static void ondemand_powersave_bias_init_cpu(int cpu) { - u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - if (iowait_time == -1ULL) - return 0; + dbs_info->freq_table = cpufreq_frequency_get_table(cpu); + dbs_info->freq_lo = 0; +} - return iowait_time; +/* + * Not all CPUs want IO time to be accounted as busy; this depends on how + * efficient idling at a higher frequency/voltage is. + * Pavel Machek says this is not so for various generations of AMD and old + * Intel systems. + * Mike Chan (androidlcom) calis this is also not true for ARM. + * Because of this, whitelist specific known (series) of CPUs by default, and + * leave all others up to the user. + */ +static int should_io_be_busy(void) +{ +#if defined(CONFIG_X86) + /* + * For Intel, Core 2 (model 15) andl later have an efficient idle. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + boot_cpu_data.x86 == 6 && + boot_cpu_data.x86_model >= 15) + return 1; +#endif + return 0; } /* @@ -135,14 +85,13 @@ static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wal * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. */ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, - unsigned int freq_next, - unsigned int relation) + unsigned int freq_next, unsigned int relation) { unsigned int freq_req, freq_reduc, freq_avg; unsigned int freq_hi, freq_lo; unsigned int index = 0; unsigned int jiffies_total, jiffies_hi, jiffies_lo; - struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, + struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu); if (!dbs_info->freq_table) { @@ -154,7 +103,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, relation, &index); freq_req = dbs_info->freq_table[index].frequency; - freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; + freq_reduc = freq_req * od_tuners.powersave_bias / 1000; freq_avg = freq_req - freq_reduc; /* Find freq bounds for freq_avg in freq_table */ @@ -173,7 +122,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, dbs_info->freq_lo_jiffies = 0; return freq_lo; } - jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + jiffies_total = usecs_to_jiffies(od_tuners.sampling_rate); jiffies_hi = (freq_avg - freq_lo) * jiffies_total; jiffies_hi += ((freq_hi - freq_lo) / 2); jiffies_hi /= (freq_hi - freq_lo); @@ -184,13 +133,6 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, return freq_hi; } -static void ondemand_powersave_bias_init_cpu(int cpu) -{ - struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - dbs_info->freq_table = cpufreq_frequency_get_table(cpu); - dbs_info->freq_lo = 0; -} - static void ondemand_powersave_bias_init(void) { int i; @@ -199,53 +141,138 @@ static void ondemand_powersave_bias_init(void) } } -/************************** sysfs interface ************************/ +static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) +{ + if (od_tuners.powersave_bias) + freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); + else if (p->cur == p->max) + return; -static ssize_t show_sampling_rate_min(struct kobject *kobj, - struct attribute *attr, char *buf) + __cpufreq_driver_target(p, freq, od_tuners.powersave_bias ? + CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); +} + +/* + * Every sampling_rate, we check, if current idle time is less than 20% + * (default), then we try to increase frequency Every sampling_rate, we look for + * a the lowest frequency which can sustain the load while keeping idle time + * over 30%. If such a frequency exist, we try to decrease to this frequency. + * + * Any frequency increase takes it to the maximum frequency. Frequency reduction + * happens at minimum steps of 5% (default) of current frequency + */ +static void od_check_cpu(int cpu, unsigned int load_freq) { - return sprintf(buf, "%u\n", min_sampling_rate); + struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; + + dbs_info->freq_lo = 0; + + /* Check for frequency increase */ + if (load_freq > od_tuners.up_threshold * policy->cur) { + /* If switching to max speed, apply sampling_down_factor */ + if (policy->cur < policy->max) + dbs_info->rate_mult = + od_tuners.sampling_down_factor; + dbs_freq_increase(policy, policy->max); + return; + } + + /* Check for frequency decrease */ + /* if we cannot reduce the frequency anymore, break out early */ + if (policy->cur == policy->min) + return; + + /* + * The optimal frequency is the frequency that is the lowest that can + * support the current CPU usage without triggering the up policy. To be + * safe, we focus 10 points under the threshold. + */ + if (load_freq < (od_tuners.up_threshold - od_tuners.down_differential) * + policy->cur) { + unsigned int freq_next; + freq_next = load_freq / (od_tuners.up_threshold - + od_tuners.down_differential); + + /* No longer fully busy, reset rate_mult */ + dbs_info->rate_mult = 1; + + if (freq_next < policy->min) + freq_next = policy->min; + + if (!od_tuners.powersave_bias) { + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } else { + int freq = powersave_bias_target(policy, freq_next, + CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, freq, + CPUFREQ_RELATION_L); + } + } } -define_one_global_ro(sampling_rate_min); +static void od_dbs_timer(struct work_struct *work) +{ + struct od_cpu_dbs_info_s *dbs_info = + container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work); + unsigned int cpu = dbs_info->cdbs.cpu; + int delay, sample_type = dbs_info->sample_type; -/* cpufreq_ondemand Governor Tunables */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct kobject *kobj, struct attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ + mutex_lock(&dbs_info->cdbs.timer_mutex); + + /* Common NORMAL_SAMPLE setup */ + dbs_info->sample_type = OD_NORMAL_SAMPLE; + if (sample_type == OD_SUB_SAMPLE) { + delay = dbs_info->freq_lo_jiffies; + __cpufreq_driver_target(dbs_info->cdbs.cur_policy, + dbs_info->freq_lo, CPUFREQ_RELATION_H); + } else { + dbs_check_cpu(&od_dbs_data, cpu); + if (dbs_info->freq_lo) { + /* Setup timer for SUB_SAMPLE */ + dbs_info->sample_type = OD_SUB_SAMPLE; + delay = dbs_info->freq_hi_jiffies; + } else { + delay = delay_for_sampling_rate(dbs_info->rate_mult); + } + } + + schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay); + mutex_unlock(&dbs_info->cdbs.timer_mutex); +} + +/************************** sysfs interface ************************/ + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", od_dbs_data.min_sampling_rate); } -show_one(sampling_rate, sampling_rate); -show_one(io_is_busy, io_is_busy); -show_one(up_threshold, up_threshold); -show_one(sampling_down_factor, sampling_down_factor); -show_one(ignore_nice_load, ignore_nice); -show_one(powersave_bias, powersave_bias); /** * update_sampling_rate - update sampling rate effective immediately if needed. * @new_rate: new sampling rate * * If new rate is smaller than the old, simply updaing - * dbs_tuners_int.sampling_rate might not be appropriate. For example, - * if the original sampling_rate was 1 second and the requested new sampling - * rate is 10 ms because the user needs immediate reaction from ondemand - * governor, but not sure if higher frequency will be required or not, - * then, the governor may change the sampling rate too late; up to 1 second - * later. Thus, if we are reducing the sampling rate, we need to make the - * new value effective immediately. + * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the + * original sampling_rate was 1 second and the requested new sampling rate is 10 + * ms because the user needs immediate reaction from ondemand governor, but not + * sure if higher frequency will be required or not, then, the governor may + * change the sampling rate too late; up to 1 second later. Thus, if we are + * reducing the sampling rate, we need to make the new value effective + * immediately. */ static void update_sampling_rate(unsigned int new_rate) { int cpu; - dbs_tuners_ins.sampling_rate = new_rate - = max(new_rate, min_sampling_rate); + od_tuners.sampling_rate = new_rate = max(new_rate, + od_dbs_data.min_sampling_rate); for_each_online_cpu(cpu) { struct cpufreq_policy *policy; - struct cpu_dbs_info_s *dbs_info; + struct od_cpu_dbs_info_s *dbs_info; unsigned long next_sampling, appointed_at; policy = cpufreq_cpu_get(cpu); @@ -254,28 +281,28 @@ static void update_sampling_rate(unsigned int new_rate) dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu); cpufreq_cpu_put(policy); - mutex_lock(&dbs_info->timer_mutex); + mutex_lock(&dbs_info->cdbs.timer_mutex); - if (!delayed_work_pending(&dbs_info->work)) { - mutex_unlock(&dbs_info->timer_mutex); + if (!delayed_work_pending(&dbs_info->cdbs.work)) { + mutex_unlock(&dbs_info->cdbs.timer_mutex); continue; } - next_sampling = jiffies + usecs_to_jiffies(new_rate); - appointed_at = dbs_info->work.timer.expires; - + next_sampling = jiffies + usecs_to_jiffies(new_rate); + appointed_at = dbs_info->cdbs.work.timer.expires; if (time_before(next_sampling, appointed_at)) { - mutex_unlock(&dbs_info->timer_mutex); - cancel_delayed_work_sync(&dbs_info->work); - mutex_lock(&dbs_info->timer_mutex); + mutex_unlock(&dbs_info->cdbs.timer_mutex); + cancel_delayed_work_sync(&dbs_info->cdbs.work); + mutex_lock(&dbs_info->cdbs.timer_mutex); - schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, - usecs_to_jiffies(new_rate)); + schedule_delayed_work_on(dbs_info->cdbs.cpu, + &dbs_info->cdbs.work, + usecs_to_jiffies(new_rate)); } - mutex_unlock(&dbs_info->timer_mutex); + mutex_unlock(&dbs_info->cdbs.timer_mutex); } } @@ -300,7 +327,7 @@ static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; - dbs_tuners_ins.io_is_busy = !!input; + od_tuners.io_is_busy = !!input; return count; } @@ -315,7 +342,7 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, input < MIN_FREQUENCY_UP_THRESHOLD) { return -EINVAL; } - dbs_tuners_ins.up_threshold = input; + od_tuners.up_threshold = input; return count; } @@ -328,12 +355,12 @@ static ssize_t store_sampling_down_factor(struct kobject *a, if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) return -EINVAL; - dbs_tuners_ins.sampling_down_factor = input; + od_tuners.sampling_down_factor = input; /* Reset down sampling multiplier in case it was active */ for_each_online_cpu(j) { - struct cpu_dbs_info_s *dbs_info; - dbs_info = &per_cpu(od_cpu_dbs_info, j); + struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, + j); dbs_info->rate_mult = 1; } return count; @@ -354,19 +381,20 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, if (input > 1) input = 1; - if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + if (input == od_tuners.ignore_nice) { /* nothing to do */ return count; } - dbs_tuners_ins.ignore_nice = input; + od_tuners.ignore_nice = input; /* we need to re-evaluate prev_cpu_idle */ for_each_online_cpu(j) { - struct cpu_dbs_info_s *dbs_info; + struct od_cpu_dbs_info_s *dbs_info; dbs_info = &per_cpu(od_cpu_dbs_info, j); - dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->cdbs.prev_cpu_wall); + if (od_tuners.ignore_nice) + dbs_info->cdbs.prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } return count; @@ -385,17 +413,25 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, if (input > 1000) input = 1000; - dbs_tuners_ins.powersave_bias = input; + od_tuners.powersave_bias = input; ondemand_powersave_bias_init(); return count; } +show_one(od, sampling_rate, sampling_rate); +show_one(od, io_is_busy, io_is_busy); +show_one(od, up_threshold, up_threshold); +show_one(od, sampling_down_factor, sampling_down_factor); +show_one(od, ignore_nice_load, ignore_nice); +show_one(od, powersave_bias, powersave_bias); + define_one_global_rw(sampling_rate); define_one_global_rw(io_is_busy); define_one_global_rw(up_threshold); define_one_global_rw(sampling_down_factor); define_one_global_rw(ignore_nice_load); define_one_global_rw(powersave_bias); +define_one_global_ro(sampling_rate_min); static struct attribute *dbs_attributes[] = { &sampling_rate_min.attr, @@ -408,354 +444,71 @@ static struct attribute *dbs_attributes[] = { NULL }; -static struct attribute_group dbs_attr_group = { +static struct attribute_group od_attr_group = { .attrs = dbs_attributes, .name = "ondemand", }; /************************** sysfs end ************************/ -static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) -{ - if (dbs_tuners_ins.powersave_bias) - freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); - else if (p->cur == p->max) - return; - - __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ? - CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); -} - -static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) -{ - unsigned int max_load_freq; - - struct cpufreq_policy *policy; - unsigned int j; - - this_dbs_info->freq_lo = 0; - policy = this_dbs_info->cur_policy; - - /* - * Every sampling_rate, we check, if current idle time is less - * than 20% (default), then we try to increase frequency - * Every sampling_rate, we look for a the lowest - * frequency which can sustain the load while keeping idle time over - * 30%. If such a frequency exist, we try to decrease to this frequency. - * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of - * 5% (default) of current frequency - */ - - /* Get Absolute Load - in terms of freq */ - max_load_freq = 0; - - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; - unsigned int idle_time, wall_time, iowait_time; - unsigned int load, load_freq; - int freq_avg; - - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - - cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); - - wall_time = (unsigned int) - (cur_wall_time - j_dbs_info->prev_cpu_wall); - j_dbs_info->prev_cpu_wall = cur_wall_time; - - idle_time = (unsigned int) - (cur_idle_time - j_dbs_info->prev_cpu_idle); - j_dbs_info->prev_cpu_idle = cur_idle_time; - - iowait_time = (unsigned int) - (cur_iowait_time - j_dbs_info->prev_cpu_iowait); - j_dbs_info->prev_cpu_iowait = cur_iowait_time; - - if (dbs_tuners_ins.ignore_nice) { - u64 cur_nice; - unsigned long cur_nice_jiffies; - - cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - - j_dbs_info->prev_cpu_nice; - /* - * Assumption: nice time between sampling periods will - * be less than 2^32 jiffies for 32 bit sys - */ - cur_nice_jiffies = (unsigned long) - cputime64_to_jiffies64(cur_nice); - - j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - idle_time += jiffies_to_usecs(cur_nice_jiffies); - } - - /* - * For the purpose of ondemand, waiting for disk IO is an - * indication that you're performance critical, and not that - * the system is actually idle. So subtract the iowait time - * from the cpu idle time. - */ - - if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) - idle_time -= iowait_time; +define_get_cpu_dbs_routines(od_cpu_dbs_info); - if (unlikely(!wall_time || wall_time < idle_time)) - continue; - - load = 100 * (wall_time - idle_time) / wall_time; - - freq_avg = __cpufreq_driver_getavg(policy, j); - if (freq_avg <= 0) - freq_avg = policy->cur; - - load_freq = load * freq_avg; - if (load_freq > max_load_freq) - max_load_freq = load_freq; - } - - /* Check for frequency increase */ - if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { - /* If switching to max speed, apply sampling_down_factor */ - if (policy->cur < policy->max) - this_dbs_info->rate_mult = - dbs_tuners_ins.sampling_down_factor; - dbs_freq_increase(policy, policy->max); - return; - } - - /* Check for frequency decrease */ - /* if we cannot reduce the frequency anymore, break out early */ - if (policy->cur == policy->min) - return; - - /* - * The optimal frequency is the frequency that is the lowest that - * can support the current CPU usage without triggering the up - * policy. To be safe, we focus 10 points under the threshold. - */ - if (max_load_freq < - (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * - policy->cur) { - unsigned int freq_next; - freq_next = max_load_freq / - (dbs_tuners_ins.up_threshold - - dbs_tuners_ins.down_differential); - - /* No longer fully busy, reset rate_mult */ - this_dbs_info->rate_mult = 1; - - if (freq_next < policy->min) - freq_next = policy->min; - - if (!dbs_tuners_ins.powersave_bias) { - __cpufreq_driver_target(policy, freq_next, - CPUFREQ_RELATION_L); - } else { - int freq = powersave_bias_target(policy, freq_next, - CPUFREQ_RELATION_L); - __cpufreq_driver_target(policy, freq, - CPUFREQ_RELATION_L); - } - } -} - -static void do_dbs_timer(struct work_struct *work) -{ - struct cpu_dbs_info_s *dbs_info = - container_of(work, struct cpu_dbs_info_s, work.work); - unsigned int cpu = dbs_info->cpu; - int sample_type = dbs_info->sample_type; - - int delay; - - mutex_lock(&dbs_info->timer_mutex); - - /* Common NORMAL_SAMPLE setup */ - dbs_info->sample_type = DBS_NORMAL_SAMPLE; - if (!dbs_tuners_ins.powersave_bias || - sample_type == DBS_NORMAL_SAMPLE) { - dbs_check_cpu(dbs_info); - if (dbs_info->freq_lo) { - /* Setup timer for SUB_SAMPLE */ - dbs_info->sample_type = DBS_SUB_SAMPLE; - delay = dbs_info->freq_hi_jiffies; - } else { - /* We want all CPUs to do sampling nearly on - * same jiffy - */ - delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate - * dbs_info->rate_mult); - - if (num_online_cpus() > 1) - delay -= jiffies % delay; - } - } else { - __cpufreq_driver_target(dbs_info->cur_policy, - dbs_info->freq_lo, CPUFREQ_RELATION_H); - delay = dbs_info->freq_lo_jiffies; - } - schedule_delayed_work_on(cpu, &dbs_info->work, delay); - mutex_unlock(&dbs_info->timer_mutex); -} - -static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) -{ - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - if (num_online_cpus() > 1) - delay -= jiffies % delay; +static struct od_ops od_ops = { + .io_busy = should_io_be_busy, + .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu, + .powersave_bias_target = powersave_bias_target, + .freq_increase = dbs_freq_increase, +}; - dbs_info->sample_type = DBS_NORMAL_SAMPLE; - INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer); - schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); -} +static struct dbs_data od_dbs_data = { + .governor = GOV_ONDEMAND, + .attr_group = &od_attr_group, + .tuners = &od_tuners, + .get_cpu_cdbs = get_cpu_cdbs, + .get_cpu_dbs_info_s = get_cpu_dbs_info_s, + .gov_dbs_timer = od_dbs_timer, + .gov_check_cpu = od_check_cpu, + .gov_ops = &od_ops, +}; -static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) { - cancel_delayed_work_sync(&dbs_info->work); + return cpufreq_governor_dbs(&od_dbs_data, policy, event); } -/* - * Not all CPUs want IO time to be accounted as busy; this dependson how - * efficient idling at a higher frequency/voltage is. - * Pavel Machek says this is not so for various generations of AMD and old - * Intel systems. - * Mike Chan (androidlcom) calis this is also not true for ARM. - * Because of this, whitelist specific known (series) of CPUs by default, and - * leave all others up to the user. - */ -static int should_io_be_busy(void) -{ -#if defined(CONFIG_X86) - /* - * For Intel, Core 2 (model 15) andl later have an efficient idle. - */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && - boot_cpu_data.x86 == 6 && - boot_cpu_data.x86_model >= 15) - return 1; +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND +static #endif - return 0; -} - -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event) -{ - unsigned int cpu = policy->cpu; - struct cpu_dbs_info_s *this_dbs_info; - unsigned int j; - int rc; - - this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - - mutex_lock(&dbs_mutex); - - dbs_enable++; - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - j_dbs_info->prev_cpu_nice = - kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - } - this_dbs_info->cpu = cpu; - this_dbs_info->rate_mult = 1; - ondemand_powersave_bias_init_cpu(cpu); - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 1) { - unsigned int latency; - - rc = sysfs_create_group(cpufreq_global_kobject, - &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - /* policy latency is in nS. Convert it to uS first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - /* Bring kernel and HW constraints together */ - min_sampling_rate = max(min_sampling_rate, - MIN_LATENCY_MULTIPLIER * latency); - dbs_tuners_ins.sampling_rate = - max(min_sampling_rate, - latency * LATENCY_MULTIPLIER); - dbs_tuners_ins.io_is_busy = should_io_be_busy(); - } - mutex_unlock(&dbs_mutex); - - mutex_init(&this_dbs_info->timer_mutex); - dbs_timer_init(this_dbs_info); - break; - - case CPUFREQ_GOV_STOP: - dbs_timer_exit(this_dbs_info); - - mutex_lock(&dbs_mutex); - mutex_destroy(&this_dbs_info->timer_mutex); - dbs_enable--; - mutex_unlock(&dbs_mutex); - if (!dbs_enable) - sysfs_remove_group(cpufreq_global_kobject, - &dbs_attr_group); - - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&this_dbs_info->timer_mutex); - if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); - dbs_check_cpu(this_dbs_info); - mutex_unlock(&this_dbs_info->timer_mutex); - break; - } - return 0; -} +struct cpufreq_governor cpufreq_gov_ondemand = { + .name = "ondemand", + .governor = od_cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; static int __init cpufreq_gov_dbs_init(void) { u64 idle_time; int cpu = get_cpu(); + mutex_init(&od_dbs_data.mutex); idle_time = get_cpu_idle_time_us(cpu, NULL); put_cpu(); if (idle_time != -1ULL) { /* Idle micro accounting is supported. Use finer thresholds */ - dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; - dbs_tuners_ins.down_differential = - MICRO_FREQUENCY_DOWN_DIFFERENTIAL; + od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; + od_tuners.down_differential = MICRO_FREQUENCY_DOWN_DIFFERENTIAL; /* * In nohz/micro accounting case we set the minimum frequency * not depending on HZ, but fixed (very low). The deferred * timer might skip some samples if idle/sleeping as needed. */ - min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; + od_dbs_data.min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; } else { /* For correct statistics, we need 10 ticks for each measure */ - min_sampling_rate = - MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); + od_dbs_data.min_sampling_rate = MIN_SAMPLING_RATE_RATIO * + jiffies_to_usecs(10); } return cpufreq_register_governor(&cpufreq_gov_ondemand); @@ -766,7 +519,6 @@ static void __exit cpufreq_gov_dbs_exit(void) cpufreq_unregister_governor(&cpufreq_gov_ondemand); } - MODULE_AUTHOR("Venkatesh Pallipadi "); MODULE_AUTHOR("Alexey Starikovskiy "); MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index d03c21993052..a55b88eaf96a 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -407,10 +407,4 @@ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, unsigned int cpu); void cpufreq_frequency_table_put_attr(unsigned int cpu); - -/********************************************************************* - * Governor Helpers * - *********************************************************************/ -cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall); - #endif /* _LINUX_CPUFREQ_H */ -- cgit v1.2.3 From 1e7586a18a2ab69a160837c0a4be31f7147cfb5e Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 26 Oct 2012 00:51:21 +0200 Subject: cpufreq: Fix sparse warnings by updating cputime64_t to u64 There were few sparse warnings due to mismatch of type on function arguments. Two types were used u64 and cputime64_t. Both are actually u64, so use u64 only. Reported-by: Fengguang Wu Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq_governor.c | 4 ++-- drivers/cpufreq/cpufreq_governor.h | 11 +++++------ drivers/cpufreq/cpufreq_stats.c | 4 ++-- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 5ea2c829a796..be9d255e292f 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -50,7 +50,7 @@ static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) return jiffies_to_usecs(idle_time); } -cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +u64 get_cpu_idle_time(unsigned int cpu, u64 *wall) { u64 idle_time = get_cpu_idle_time_us(cpu, NULL); @@ -83,7 +83,7 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) /* Get Absolute Load (in terms of freq for ondemand gov) */ for_each_cpu(j, policy->cpus) { struct cpu_dbs_common_info *j_cdbs; - cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + u64 cur_wall_time, cur_idle_time, cur_iowait_time; unsigned int idle_time, wall_time, iowait_time; unsigned int load; diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 34e14adfc3f9..f6616540c53d 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -17,7 +17,6 @@ #ifndef _CPUFREQ_GOVERNER_H #define _CPUFREQ_GOVERNER_H -#include #include #include #include @@ -72,9 +71,9 @@ static void *get_cpu_dbs_info_s(int cpu) \ /* Per cpu structures */ struct cpu_dbs_common_info { int cpu; - cputime64_t prev_cpu_idle; - cputime64_t prev_cpu_wall; - cputime64_t prev_cpu_nice; + u64 prev_cpu_idle; + u64 prev_cpu_wall; + u64 prev_cpu_nice; struct cpufreq_policy *cur_policy; struct delayed_work work; /* @@ -87,7 +86,7 @@ struct cpu_dbs_common_info { struct od_cpu_dbs_info_s { struct cpu_dbs_common_info cdbs; - cputime64_t prev_cpu_iowait; + u64 prev_cpu_iowait; struct cpufreq_frequency_table *freq_table; unsigned int freq_lo; unsigned int freq_lo_jiffies; @@ -170,7 +169,7 @@ static inline int delay_for_sampling_rate(unsigned int sampling_rate) return delay; } -cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall); +u64 get_cpu_idle_time(unsigned int cpu, u64 *wall); void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); int cpufreq_governor_dbs(struct dbs_data *dbs_data, struct cpufreq_policy *policy, unsigned int event); diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 399831690fed..e40e50809644 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -37,7 +37,7 @@ struct cpufreq_stats { unsigned int max_state; unsigned int state_num; unsigned int last_index; - cputime64_t *time_in_state; + u64 *time_in_state; unsigned int *freq_table; #ifdef CONFIG_CPU_FREQ_STAT_DETAILS unsigned int *trans_table; @@ -223,7 +223,7 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy, count++; } - alloc_size = count * sizeof(int) + count * sizeof(cputime64_t); + alloc_size = count * sizeof(int) + count * sizeof(u64); #ifdef CONFIG_CPU_FREQ_STAT_DETAILS alloc_size += count * count * sizeof(int); -- cgit v1.2.3 From da58445570326ac2a342770a9c9a2646276e1721 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 26 Oct 2012 00:51:32 +0200 Subject: cpufreq: Fix sparse warning by making local function static cpufreq_disabled() is a local function, so should be marked static. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index f552d5fe0f8f..261ef654a352 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -129,7 +129,7 @@ static int __init init_cpufreq_transition_notifier_list(void) pure_initcall(init_cpufreq_transition_notifier_list); static int off __read_mostly; -int cpufreq_disabled(void) +static int cpufreq_disabled(void) { return off; } -- cgit v1.2.3 From 5a1c022850ea5d64c2997bf9b89f5ae112d5ee4d Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 31 Oct 2012 01:28:15 +0100 Subject: cpufreq: Avoid calling cpufreq driver's target() routine if target_freq == policy->cur Avoid calling cpufreq driver's target() routine if new frequency is same as policies current frequency. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 261ef654a352..28dc134cec36 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1476,6 +1476,10 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, pr_debug("target for CPU %u: %u kHz, relation %u\n", policy->cpu, target_freq, relation); + + if (target_freq == policy->cur) + return 0; + if (cpu_online(policy->cpu) && cpufreq_driver->target) retval = cpufreq_driver->target(policy, target_freq, relation); -- cgit v1.2.3 From 7249924e537816368c4a35afd97ab311f75a6368 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 31 Oct 2012 01:28:21 +0100 Subject: cpufreq: Make sure target freq is within limits __cpufreq_driver_target() must not pass target frequency beyond the limits of current policy. Today most of cpufreq platform drivers are doing this check in their target routines. Why not move it to __cpufreq_driver_target()? Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 28dc134cec36..2f5ac2dcfda6 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1470,12 +1470,19 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int relation) { int retval = -EINVAL; + unsigned int old_target_freq = target_freq; if (cpufreq_disabled()) return -ENODEV; - pr_debug("target for CPU %u: %u kHz, relation %u\n", policy->cpu, - target_freq, relation); + /* Make sure that target_freq is within supported range */ + if (target_freq > policy->max) + target_freq = policy->max; + if (target_freq < policy->min) + target_freq = policy->min; + + pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", + policy->cpu, target_freq, relation, old_target_freq); if (target_freq == policy->cur) return 0; -- cgit v1.2.3 From f55c9c26278b0fcfa3336eccbba3d8a782da8aed Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Wed, 31 Oct 2012 05:49:13 +0000 Subject: cpufreq: Remove unnecessary initialization of a local variable Remove an unnecessary initializer for the 'ret' variable in __cpufreq_set_policy(). [rjw: Modified the subject and changelog slightly.] Signed-off-by: Jingoo Han Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 2f5ac2dcfda6..1f93dbd72355 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -404,7 +404,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, static ssize_t store_##file_name \ (struct cpufreq_policy *policy, const char *buf, size_t count) \ { \ - unsigned int ret = -EINVAL; \ + unsigned int ret; \ struct cpufreq_policy new_policy; \ \ ret = cpufreq_get_policy(&new_policy, policy->cpu); \ @@ -459,7 +459,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) static ssize_t store_scaling_governor(struct cpufreq_policy *policy, const char *buf, size_t count) { - unsigned int ret = -EINVAL; + unsigned int ret; char str_governor[16]; struct cpufreq_policy new_policy; -- cgit v1.2.3 From ce2650d40dff23f2c6f9718bb3ec63e12c5c7f27 Mon Sep 17 00:00:00 2001 From: Bill Pemberton Date: Wed, 21 Nov 2012 01:18:30 +0100 Subject: cpufreq: remove use of __devexit_p CONFIG_HOTPLUG is going away as an option so __devexit_p is no longer needed. Signed-off-by: Bill Pemberton Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/longhaul.c | 2 +- drivers/cpufreq/powernow-k8.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 53ddbc760af7..8d7ebb286e6f 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -946,7 +946,7 @@ static struct cpufreq_driver longhaul_driver = { .target = longhaul_target, .get = longhaul_get, .init = longhaul_cpu_init, - .exit = __devexit_p(longhaul_cpu_exit), + .exit = longhaul_cpu_exit, .name = "longhaul", .owner = THIS_MODULE, .attr = longhaul_attr, diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index e3ebb4fa2c3e..1fd0c8a48ed4 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -1242,7 +1242,7 @@ static struct cpufreq_driver cpufreq_amd64_driver = { .target = powernowk8_target, .bios_limit = acpi_processor_get_bios_limit, .init = powernowk8_cpu_init, - .exit = __devexit_p(powernowk8_cpu_exit), + .exit = powernowk8_cpu_exit, .get = powernowk8_get, .name = "powernow-k8", .owner = THIS_MODULE, -- cgit v1.2.3 From c5fa4ab5ab417d8d7bd658957ce7b7e6ef0cdaf3 Mon Sep 17 00:00:00 2001 From: Bill Pemberton Date: Wed, 21 Nov 2012 01:18:40 +0100 Subject: cpufreq: remove use of __devinit CONFIG_HOTPLUG is going away as an option so __devinit is no longer needed. Signed-off-by: Bill Pemberton Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq-cpu0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index e9158278c71d..52bf36d599f5 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -174,7 +174,7 @@ static struct cpufreq_driver cpu0_cpufreq_driver = { .attr = cpu0_cpufreq_attr, }; -static int __devinit cpu0_cpufreq_driver_init(void) +static int cpu0_cpufreq_driver_init(void) { struct device_node *np; int ret; -- cgit v1.2.3 From c0e61cb151f2ff8edd02af23b2bd49f625288124 Mon Sep 17 00:00:00 2001 From: Bill Pemberton Date: Wed, 21 Nov 2012 01:18:49 +0100 Subject: cpufreq: remove use of __devexit CONFIG_HOTPLUG is going away as an option so __devexit is no longer needed. Signed-off-by: Bill Pemberton Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/longhaul.c | 2 +- drivers/cpufreq/powernow-k8.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 8d7ebb286e6f..f1fa500ac105 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -930,7 +930,7 @@ static int __cpuinit longhaul_cpu_init(struct cpufreq_policy *policy) return 0; } -static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) +static int longhaul_cpu_exit(struct cpufreq_policy *policy) { cpufreq_frequency_table_put_attr(policy->cpu); return 0; diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 1fd0c8a48ed4..056faf6af1a9 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -1186,7 +1186,7 @@ err_out: return -ENODEV; } -static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) +static int powernowk8_cpu_exit(struct cpufreq_policy *pol) { struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); -- cgit v1.2.3 From fd06a20852e145b2b96c7c6b655fcb9f23ac4e00 Mon Sep 17 00:00:00 2001 From: Tomasz Figa Date: Thu, 22 Nov 2012 00:09:27 +0100 Subject: cpufreq: exynos: Broadcast frequency change notifications for all cores On Exynos SoCs all cores share the same frequency setting, so changing frequency of one core will affect rest of cores. This patch modifies the exynos-cpufreq driver to inform cpufreq core about this behavior and broadcast frequency change notifications for all cores. Signed-off-by: Tomasz Figa Signed-off-by: Kyungmin Park Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/exynos-cpufreq.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index af2d81e10f71..c0d54a8ba949 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c @@ -100,7 +100,8 @@ static int exynos_target(struct cpufreq_policy *policy, } arm_volt = volt_table[index]; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + for_each_cpu(freqs.cpu, policy->cpus) + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); /* When the new frequency is higher than current frequency */ if ((freqs.new > freqs.old) && !safe_arm_volt) { @@ -115,7 +116,8 @@ static int exynos_target(struct cpufreq_policy *policy, if (freqs.new != freqs.old) exynos_info->set_freq(old_index, index); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + for_each_cpu(freqs.cpu, policy->cpus) + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); /* When the new frequency is lower than current frequency */ if ((freqs.new < freqs.old) || @@ -235,6 +237,7 @@ static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) cpumask_copy(policy->related_cpus, cpu_possible_mask); cpumask_copy(policy->cpus, cpu_online_mask); } else { + policy->shared_type = CPUFREQ_SHARED_TYPE_ANY; cpumask_setall(policy->cpus); } -- cgit v1.2.3 From 5542721a337f3fcfa0585c915827dc391f36fd28 Mon Sep 17 00:00:00 2001 From: Tushar Behera Date: Thu, 22 Nov 2012 00:19:25 +0100 Subject: cpufreq: exynos: Use static for functions used in only this file Fixes following sparse error. drivers/cpufreq/exynos-cpufreq.c:34:5: warning: symbol 'exynos_verify_speed' was not declared. Should it be static? drivers/cpufreq/exynos-cpufreq.c:40:14: warning: symbol 'exynos_getspeed' was not declared. Should it be static? Signed-off-by: Tushar Behera Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/exynos-cpufreq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index c0d54a8ba949..7012ea8bf1e7 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c @@ -31,13 +31,13 @@ static unsigned int locking_frequency; static bool frequency_locked; static DEFINE_MUTEX(cpufreq_lock); -int exynos_verify_speed(struct cpufreq_policy *policy) +static int exynos_verify_speed(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, exynos_info->freq_table); } -unsigned int exynos_getspeed(unsigned int cpu) +static unsigned int exynos_getspeed(unsigned int cpu) { return clk_get_rate(exynos_info->cpu_clk) / 1000; } -- cgit v1.2.3 From d3c31a773fa33f78a29bb00ed0dcf8fa55dd4b3a Mon Sep 17 00:00:00 2001 From: Fabio Baltieri Date: Fri, 23 Nov 2012 20:48:08 +0100 Subject: cpufreq: ondemand: fix wrong delay sampling rate Restore the correct delay value for ondemand's od_dbs_timer, as it was changed erroneously in commit 83f0e55 (cpufreq: governors: remove redundant code). Signed-off-by: Fabio Baltieri Reviewed-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq_ondemand.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index bdaab9206303..cca3e9fee9a7 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -234,7 +234,8 @@ static void od_dbs_timer(struct work_struct *work) dbs_info->sample_type = OD_SUB_SAMPLE; delay = dbs_info->freq_hi_jiffies; } else { - delay = delay_for_sampling_rate(dbs_info->rate_mult); + delay = delay_for_sampling_rate(od_tuners.sampling_rate + * dbs_info->rate_mult); } } -- cgit v1.2.3 From a0e5af3cb89b59aa6c62b1f97c8d553ff3fb51c1 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sat, 24 Nov 2012 10:08:47 +0100 Subject: cpufreq: governors: Fix jiffies/cputime mixup (revisited) This change was made by commit 8636fd2 (cpufreq: fix jiffies/cputime mixup in conservative/ondemand governors) before, but then it has been reverted inadvertently by commit 4471a34 (cpufreq: governors: remove redundant code). The changelog of commit 8636fd2's says: The function get_cpu_idle_time_jiffy in both the conservative and ondemand governors use jiffies_to_usecs to convert a cputime value to usecs which gives the wrong value on architectures where cputime and jiffies use different units. Only matters if NO_HZ is disabled, since otherwise get_cpu_idle_time_us should already return a valid value, and get_cpu_idle_time_jiffy isn't actually called. Since now we have only one common get_cpu_idle_time_jiffy() used by both governors in question, modify it along the lines of commit 8636fd2 to restore the correct behavior. Signed-off-by: Rafael J. Wysocki Reviewed-by: Viresh Kumar --- drivers/cpufreq/cpufreq_governor.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index be9d255e292f..6c5f1d383cdc 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -45,9 +45,9 @@ static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) idle_time = cur_wall_time - busy_time; if (wall) - *wall = jiffies_to_usecs(cur_wall_time); + *wall = cputime_to_usecs(cur_wall_time); - return jiffies_to_usecs(idle_time); + return cputime_to_usecs(idle_time); } u64 get_cpu_idle_time(unsigned int cpu, u64 *wall) -- cgit v1.2.3 From 420993221175a45db5af012c53fd2fef4d9533dc Mon Sep 17 00:00:00 2001 From: Deepak Sikri Date: Tue, 27 Nov 2012 14:05:26 +0100 Subject: cpufreq: SPEAr: Add CPUFreq driver SPEAr is an ARM based family of SoCs. This patch adds in support of cpufreq driver for SPEAr SoCs. It is supported via DT only and so bindings are present in binding document. Signed-off-by: Deepak Sikri Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- .../devicetree/bindings/cpufreq/cpufreq-spear.txt | 42 +++ arch/arm/Kconfig | 1 + drivers/cpufreq/Kconfig.arm | 7 + drivers/cpufreq/Makefile | 1 + drivers/cpufreq/spear-cpufreq.c | 291 +++++++++++++++++++++ 5 files changed, 342 insertions(+) create mode 100644 Documentation/devicetree/bindings/cpufreq/cpufreq-spear.txt create mode 100644 drivers/cpufreq/spear-cpufreq.c diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-spear.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-spear.txt new file mode 100644 index 000000000000..f3d44984d91c --- /dev/null +++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-spear.txt @@ -0,0 +1,42 @@ +SPEAr cpufreq driver +------------------- + +SPEAr SoC cpufreq driver for CPU frequency scaling. +It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) systems +which share clock across all CPUs. + +Required properties: +- cpufreq_tbl: Table of frequencies CPU could be transitioned into, in the + increasing order. + +Optional properties: +- clock-latency: Specify the possible maximum transition latency for clock, in + unit of nanoseconds. + +Both required and optional properties listed above must be defined under node +/cpus/cpu@0. + +Examples: +-------- +cpus { + + <...> + + cpu@0 { + compatible = "arm,cortex-a9"; + reg = <0>; + + <...> + + cpufreq_tbl = < 166000 + 200000 + 250000 + 300000 + 400000 + 500000 + 600000 >; + }; + + <...> + +}; diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ade7e924bef5..159e99737e31 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -904,6 +904,7 @@ config ARCH_NOMADIK config PLAT_SPEAR bool "ST SPEAr" + select ARCH_HAS_CPUFREQ select ARCH_REQUIRE_GPIOLIB select ARM_AMBA select CLKDEV_LOOKUP diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 5961e6415f08..a0b3661d90b0 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -76,3 +76,10 @@ config ARM_EXYNOS5250_CPUFREQ help This adds the CPUFreq driver for Samsung EXYNOS5250 SoC. + +config ARM_SPEAR_CPUFREQ + bool "SPEAr CPUFreq support" + depends on PLAT_SPEAR + default y + help + This adds the CPUFreq driver support for SPEAr SOCs. diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 5b1413e47216..1f254ec087c1 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -50,6 +50,7 @@ obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o +obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o ################################################################################## # PowerPC platform drivers diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c new file mode 100644 index 000000000000..4575cfe41755 --- /dev/null +++ b/drivers/cpufreq/spear-cpufreq.c @@ -0,0 +1,291 @@ +/* + * drivers/cpufreq/spear-cpufreq.c + * + * CPU Frequency Scaling for SPEAr platform + * + * Copyright (C) 2012 ST Microelectronics + * Deepak Sikri + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +/* SPEAr CPUFreq driver data structure */ +static struct { + struct clk *clk; + unsigned int transition_latency; + struct cpufreq_frequency_table *freq_tbl; + u32 cnt; +} spear_cpufreq; + +int spear_cpufreq_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl); +} + +static unsigned int spear_cpufreq_get(unsigned int cpu) +{ + return clk_get_rate(spear_cpufreq.clk) / 1000; +} + +static struct clk *spear1340_cpu_get_possible_parent(unsigned long newfreq) +{ + struct clk *sys_pclk; + int pclk; + /* + * In SPEAr1340, cpu clk's parent sys clk can take input from + * following sources + */ + const char *sys_clk_src[] = { + "sys_syn_clk", + "pll1_clk", + "pll2_clk", + "pll3_clk", + }; + + /* + * As sys clk can have multiple source with their own range + * limitation so we choose possible sources accordingly + */ + if (newfreq <= 300000000) + pclk = 0; /* src is sys_syn_clk */ + else if (newfreq > 300000000 && newfreq <= 500000000) + pclk = 3; /* src is pll3_clk */ + else if (newfreq == 600000000) + pclk = 1; /* src is pll1_clk */ + else + return ERR_PTR(-EINVAL); + + /* Get parent to sys clock */ + sys_pclk = clk_get(NULL, sys_clk_src[pclk]); + if (IS_ERR(sys_pclk)) + pr_err("Failed to get %s clock\n", sys_clk_src[pclk]); + + return sys_pclk; +} + +/* + * In SPEAr1340, we cannot use newfreq directly because we need to actually + * access a source clock (clk) which might not be ancestor of cpu at present. + * Hence in SPEAr1340 we would operate on source clock directly before switching + * cpu clock to it. + */ +static int spear1340_set_cpu_rate(struct clk *sys_pclk, unsigned long newfreq) +{ + struct clk *sys_clk; + int ret = 0; + + sys_clk = clk_get_parent(spear_cpufreq.clk); + if (IS_ERR(sys_clk)) { + pr_err("failed to get cpu's parent (sys) clock\n"); + return PTR_ERR(sys_clk); + } + + /* Set the rate of the source clock before changing the parent */ + ret = clk_set_rate(sys_pclk, newfreq); + if (ret) { + pr_err("Failed to set sys clk rate to %lu\n", newfreq); + return ret; + } + + ret = clk_set_parent(sys_clk, sys_pclk); + if (ret) { + pr_err("Failed to set sys clk parent\n"); + return ret; + } + + return 0; +} + +static int spear_cpufreq_target(struct cpufreq_policy *policy, + unsigned int target_freq, unsigned int relation) +{ + struct cpufreq_freqs freqs; + unsigned long newfreq; + struct clk *srcclk; + int index, ret, mult = 1; + + if (cpufreq_frequency_table_target(policy, spear_cpufreq.freq_tbl, + target_freq, relation, &index)) + return -EINVAL; + + freqs.cpu = policy->cpu; + freqs.old = spear_cpufreq_get(0); + + newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000; + if (of_machine_is_compatible("st,spear1340")) { + /* + * SPEAr1340 is special in the sense that due to the possibility + * of multiple clock sources for cpu clk's parent we can have + * different clock source for different frequency of cpu clk. + * Hence we need to choose one from amongst these possible clock + * sources. + */ + srcclk = spear1340_cpu_get_possible_parent(newfreq); + if (IS_ERR(srcclk)) { + pr_err("Failed to get src clk\n"); + return PTR_ERR(srcclk); + } + + /* SPEAr1340: src clk is always 2 * intended cpu clk */ + mult = 2; + } else { + /* + * src clock to be altered is ancestor of cpu clock. Hence we + * can directly work on cpu clk + */ + srcclk = spear_cpufreq.clk; + } + + newfreq = clk_round_rate(srcclk, newfreq * mult); + if (newfreq < 0) { + pr_err("clk_round_rate failed for cpu src clock\n"); + return newfreq; + } + + freqs.new = newfreq / 1000; + freqs.new /= mult; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + if (mult == 2) + ret = spear1340_set_cpu_rate(srcclk, newfreq); + else + ret = clk_set_rate(spear_cpufreq.clk, newfreq); + + /* Get current rate after clk_set_rate, in case of failure */ + if (ret) { + pr_err("CPU Freq: cpu clk_set_rate failed: %d\n", ret); + freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000; + } + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + return ret; +} + +static int spear_cpufreq_init(struct cpufreq_policy *policy) +{ + int ret; + + ret = cpufreq_frequency_table_cpuinfo(policy, spear_cpufreq.freq_tbl); + if (ret) { + pr_err("cpufreq_frequency_table_cpuinfo() failed"); + return ret; + } + + cpufreq_frequency_table_get_attr(spear_cpufreq.freq_tbl, policy->cpu); + policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency; + policy->cur = spear_cpufreq_get(0); + + cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); + cpumask_copy(policy->related_cpus, policy->cpus); + + return 0; +} + +static int spear_cpufreq_exit(struct cpufreq_policy *policy) +{ + cpufreq_frequency_table_put_attr(policy->cpu); + return 0; +} + +static struct freq_attr *spear_cpufreq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver spear_cpufreq_driver = { + .name = "cpufreq-spear", + .flags = CPUFREQ_STICKY, + .verify = spear_cpufreq_verify, + .target = spear_cpufreq_target, + .get = spear_cpufreq_get, + .init = spear_cpufreq_init, + .exit = spear_cpufreq_exit, + .attr = spear_cpufreq_attr, +}; + +static int spear_cpufreq_driver_init(void) +{ + struct device_node *np; + const struct property *prop; + struct cpufreq_frequency_table *freq_tbl; + const __be32 *val; + int cnt, i, ret; + + np = of_find_node_by_path("/cpus/cpu@0"); + if (!np) { + pr_err("No cpu node found"); + return -ENODEV; + } + + if (of_property_read_u32(np, "clock-latency", + &spear_cpufreq.transition_latency)) + spear_cpufreq.transition_latency = CPUFREQ_ETERNAL; + + prop = of_find_property(np, "cpufreq_tbl", NULL); + if (!prop || !prop->value) { + pr_err("Invalid cpufreq_tbl"); + ret = -ENODEV; + goto out_put_node; + } + + cnt = prop->length / sizeof(u32); + val = prop->value; + + freq_tbl = kmalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL); + if (!freq_tbl) { + ret = -ENOMEM; + goto out_put_node; + } + + for (i = 0; i < cnt; i++) { + freq_tbl[i].index = i; + freq_tbl[i].frequency = be32_to_cpup(val++); + } + + freq_tbl[i].index = i; + freq_tbl[i].frequency = CPUFREQ_TABLE_END; + + spear_cpufreq.freq_tbl = freq_tbl; + + of_node_put(np); + + spear_cpufreq.clk = clk_get(NULL, "cpu_clk"); + if (IS_ERR(spear_cpufreq.clk)) { + pr_err("Unable to get CPU clock\n"); + ret = PTR_ERR(spear_cpufreq.clk); + goto out_put_mem; + } + + ret = cpufreq_register_driver(&spear_cpufreq_driver); + if (!ret) + return 0; + + pr_err("failed register driver: %d\n", ret); + clk_put(spear_cpufreq.clk); + +out_put_mem: + kfree(freq_tbl); + return ret; + +out_put_node: + of_node_put(np); + return ret; +} +late_initcall(spear_cpufreq_driver_init); + +MODULE_AUTHOR("Deepak Sikri "); +MODULE_DESCRIPTION("SPEAr CPUFreq driver"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 3e33ee9e0804748f1b7a01af9ba0e8e1e10f772f Mon Sep 17 00:00:00 2001 From: Fabio Baltieri Date: Mon, 26 Nov 2012 18:10:12 +0000 Subject: cpufreq: ondemand: update sampling rate only on right CPUs Fix cpufreq_gov_ondemand to skip CPU where another governor is used. The bug present itself as NULL pointer access on the mutex_lock() call, an can be reproduced on an SMP machine by setting the default governor to anything other than ondemand, setting a single CPU's governor to ondemand, then changing the sample rate by writing on: > /sys/devices/system/cpu/cpufreq/ondemand/sampling_rate Backtrace: Nov 26 17:36:54 balto kernel: [ 839.585241] BUG: unable to handle kernel NULL pointer dereference at (null) Nov 26 17:36:54 balto kernel: [ 839.585311] IP: [] __mutex_lock_slowpath+0xb2/0x170 [snip] Nov 26 17:36:54 balto kernel: [ 839.587005] Call Trace: Nov 26 17:36:54 balto kernel: [ 839.587030] [] mutex_lock+0x22/0x40 Nov 26 17:36:54 balto kernel: [ 839.587067] [] store_sampling_rate+0xbf/0x150 Nov 26 17:36:54 balto kernel: [ 839.587110] [] ? __do_page_fault+0x1cc/0x4c0 Nov 26 17:36:54 balto kernel: [ 839.587153] [] kobj_attr_store+0xf/0x20 Nov 26 17:36:54 balto kernel: [ 839.587192] [] sysfs_write_file+0xcd/0x140 Nov 26 17:36:54 balto kernel: [ 839.587234] [] vfs_write+0xac/0x180 Nov 26 17:36:54 balto kernel: [ 839.587271] [] sys_write+0x52/0xa0 Nov 26 17:36:54 balto kernel: [ 839.587306] [] ? do_page_fault+0xe/0x10 Nov 26 17:36:54 balto kernel: [ 839.587345] [] system_call_fastpath+0x16/0x1b Signed-off-by: Fabio Baltieri Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq_ondemand.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index cca3e9fee9a7..7731f7c7e79a 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -40,6 +40,10 @@ static struct dbs_data od_dbs_data; static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info); +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND +static struct cpufreq_governor cpufreq_gov_ondemand; +#endif + static struct od_dbs_tuners od_tuners = { .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, @@ -279,6 +283,10 @@ static void update_sampling_rate(unsigned int new_rate) policy = cpufreq_cpu_get(cpu); if (!policy) continue; + if (policy->governor != &cpufreq_gov_ondemand) { + cpufreq_cpu_put(policy); + continue; + } dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu); cpufreq_cpu_put(policy); -- cgit v1.2.3