Anda di halaman 1dari 34

diff -uNr Vanilla/drivers/cpufreq/cpufreq_interactivex.c New/drivers/cpufreq/cpu freq_interactivex.c --- Vanilla/drivers/cpufreq/cpufreq_interactivex.c 1969-12-31 18:00:00.0000 00000 -0600 +++ New/drivers/cpufreq/cpufreq_interactivex.c 2012-06-30 19:56:47.

302115034 -0 500 @@ -0,0 +1,404 @@ +/* + * drivers/cpufreq/cpufreq_interactivex.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Mike Chan (mike@android.com) - modified for suspend/wake by imoseyon + * + */ + +#include <linux/cpu.h> +#include <linux/cpumask.h> +#include <linux/cpufreq.h> +#include <linux/mutex.h> +#include <linux/sched.h> +#include <linux/tick.h> +#include <linux/timer.h> +#include <linux/workqueue.h> +#include <linux/earlysuspend.h> + +#include <asm/cputime.h> + +static void (*pm_idle_old)(void); +static atomic_t active_count = ATOMIC_INIT(0); + +static DEFINE_PER_CPU(struct timer_list, cpu_timer); + +static DEFINE_PER_CPU(u64, time_in_idle); +static DEFINE_PER_CPU(u64, idle_exit_time); + +static struct cpufreq_policy *policy; +static unsigned int target_freq; + +/* Workqueues handle frequency scaling */ +static struct workqueue_struct *up_wq; +static struct workqueue_struct *down_wq; +static struct work_struct freq_scale_work; + +static u64 freq_change_time; +static u64 freq_change_time_in_idle; + +static cpumask_t work_cpumask; + +static unsigned int suspended = 0; +static unsigned int enabled = 0;

+ +/* + * The minimum ammount of time to spend at a frequency before we can ramp down, + * default is 50ms. + */ +#define DEFAULT_MIN_SAMPLE_TIME 50000; +static unsigned long min_sample_time; + +#define FREQ_THRESHOLD 1024000; +static unsigned int freq_threshld; + +#define RESUME_SPEED 1024000; +static unsigned int resum_speed; + +static int cpufreq_governor_interactivex(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX +static +#endif +struct cpufreq_governor cpufreq_gov_interactivex = { + .name = "interactiveX", + .governor = cpufreq_governor_interactivex, +#if defined(CONFIG_ARCH_MSM_SCORPION) + .max_transition_latency = 8000000, +#else + .max_transition_latency = 10000000, +#endif + .owner = THIS_MODULE, +}; + +static void cpufreq_interactivex_timer(unsigned long data) +{ + u64 delta_idle; + u64 update_time; + u64 *cpu_time_in_idle; + u64 *cpu_idle_exit_time; + struct timer_list *t; + + u64 now_idle = get_cpu_idle_time_us(data, + &update_time); + + + cpu_time_in_idle = &per_cpu(time_in_idle, data); + cpu_idle_exit_time = &per_cpu(idle_exit_time, data); + + if (update_time == *cpu_idle_exit_time) + return; + + delta_idle = cputime64_sub(now_idle, *cpu_time_in_idle); + + /* Scale up if there were no idle cycles since coming out of idle */ + if (delta_idle == 0) { + if (policy->cur == policy->max) + return; + + if (nr_running() < 1) + return; + + target_freq = policy->max;

+ + cpumask_set_cpu(data, &work_cpumask); + queue_work(up_wq, &freq_scale_work); + return; + } + + /* + * There is a window where if the cpu utlization can go from low to high + * between the timer expiring, delta_idle will be > 0 and the cpu will + * be 100% busy, preventing idle from running, and this timer from + * firing. So setup another timer to fire to check cpu utlization. + * Do not setup the timer if there is no scheduled work. + */ + t = &per_cpu(cpu_timer, data); + if (!timer_pending(t) && nr_running() > 0) { + *cpu_time_in_idle = get_cpu_idle_time_us( + data, cpu_idle_exit_time); + mod_timer(t, jiffies + 2); + } + + if (policy->cur == policy->min) + return; + + /* + * Do not scale down unless we have been at this frequency for the + * minimum sample time. + */ + if (cputime64_sub(update_time, freq_change_time) < min_sample_time) + return; + + target_freq = policy->min; + cpumask_set_cpu(data, &work_cpumask); + queue_work(down_wq, &freq_scale_work); +} + +static void cpufreq_idle(void) +{ + struct timer_list *t; + u64 *cpu_time_in_idle; + u64 *cpu_idle_exit_time; + + pm_idle_old(); + + if (!cpumask_test_cpu(smp_processor_id(), policy->cpus)) + return; + + /* Timer to fire in 1-2 ticks, jiffie aligned. */ + t = &per_cpu(cpu_timer, smp_processor_id()); + cpu_idle_exit_time = &per_cpu(idle_exit_time, smp_processor_id()); + cpu_time_in_idle = &per_cpu(time_in_idle, smp_processor_id()); + + if (timer_pending(t) == 0) { + *cpu_time_in_idle = get_cpu_idle_time_us( + smp_processor_id(), cpu_idle_exit_time); + mod_timer(t, jiffies + 2); + } +} + +/* + * Choose the cpu frequency based off the load. For now choose the minimum

+ * frequency that will satisfy the load, which is not always the lower power. + */ +static unsigned int cpufreq_interactivex_calc_freq(unsigned int cpu) +{ + unsigned int delta_time; + unsigned int idle_time; + unsigned int cpu_load; + unsigned int newfreq; + u64 current_wall_time; + u64 current_idle_time;; + + current_idle_time = get_cpu_idle_time_us(cpu, &current_wall_time); + + idle_time = (unsigned int) current_idle_time - freq_change_time_in_idle; + delta_time = (unsigned int) current_wall_time - freq_change_time; + + cpu_load = 100 * (delta_time - idle_time) / delta_time; + + if (cpu_load > 98) newfreq = policy->max; + else newfreq = policy->cur * cpu_load / 100; + + return newfreq; +} + + +/* We use the same work function to sale up and down */ +static void cpufreq_interactivex_freq_change_time_work(struct work_struct *work ) +{ + unsigned int cpu; + unsigned int newtarget; + cpumask_t tmp_mask = work_cpumask; + newtarget = freq_threshld; + + for_each_cpu(cpu, tmp_mask) { + if (!suspended) { + if (target_freq == policy->max) { + if (nr_running() == 1) { + cpumask_clear_cpu(cpu, &work_cpumask); + return; + } +// __cpufreq_driver_target(policy, target_freq, CPUFREQ_REL ATION_H); + __cpufreq_driver_target(policy, newtarget, CPUFREQ_RELAT ION_H); + } else { + target_freq = cpufreq_interactivex_calc_freq(cpu); + __cpufreq_driver_target(policy, target_freq, + CPUFREQ_RELATION_L); + } + } + freq_change_time_in_idle = get_cpu_idle_time_us(cpu, &freq_change_time ); + cpumask_clear_cpu(cpu, &work_cpumask); + } + + +} + +static ssize_t show_min_sample_time(struct kobject *kobj,

+ struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", min_sample_time); +} + +static ssize_t store_min_sample_time(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + return strict_strtoul(buf, 0, &min_sample_time); +} + +static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644, + show_min_sample_time, store_min_sample_time); + +static struct attribute *interactivex_attributes[] = { + &min_sample_time_attr.attr, + NULL, +}; + +static struct attribute_group interactivex_attr_group = { + .attrs = interactivex_attributes, + .name = "interactiveX", +}; + +static void interactivex_suspend(int suspend) +{ + unsigned int max_speed; + + max_speed = resum_speed; + + if (!enabled) return; + if (!suspend) { // resume at max speed: + suspended = 0; + __cpufreq_driver_target(policy, max_speed, CPUFREQ_RELATION_L); + pr_info("[imoseyon] interactiveX awake at %d\n", policy->cur); + } else { + suspended = 1; + __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L ); + pr_info("[imoseyon] interactiveX suspended at %d\n", policy->cu r); + } +} + +static void interactivex_early_suspend(struct early_suspend *handler) { + interactivex_suspend(1); +} + +static void interactivex_late_resume(struct early_suspend *handler) { + interactivex_suspend(0); +} + +static struct early_suspend interactivex_power_suspend = { + .suspend = interactivex_early_suspend, + .resume = interactivex_late_resume, + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, +}; + +static int cpufreq_governor_interactivex(struct cpufreq_policy *new_policy, + unsigned int event)

+{ + int rc; + unsigned int min_freq = ~0; + unsigned int max_freq = 0; + unsigned int i; + struct cpufreq_frequency_table *freq_table; + + switch (event) { + case CPUFREQ_GOV_START: + if (!cpu_online(new_policy->cpu)) + return -EINVAL; + + /* + * Do not register the idle hook and create sysfs + * entries if we have already done so. + */ + if (atomic_inc_return(&active_count) > 1) + return 0; + + rc = sysfs_create_group(cpufreq_global_kobject, + &interactivex_attr_group); + if (rc) + return rc; + + pm_idle_old = pm_idle; + pm_idle = cpufreq_idle; + policy = new_policy; + enabled = 1; + register_early_suspend(&interactivex_power_suspend); + pr_info("[imoseyon] interactiveX active\n"); + freq_table = cpufreq_frequency_get_table(new_policy->cpu); + for (i = 0; (freq_table[i].frequency != CPUFREQ_TABLE_END); i++) { + unsigned int freq = freq_table[i].frequency; + if (freq == CPUFREQ_ENTRY_INVALID) { + continue; + } + if (freq < min_freq) + min_freq = freq; + if (freq > max_freq) + max_freq = freq; + } + resum_speed = freq_table[(i-1)/2].frequency > min_freq ? freq_ta ble[(i-1)/2].frequency : max_freq; //Value in midrange of available CPU frequencies if sufficient number of freq bins available + freq_threshld = max_freq; + break; + + case CPUFREQ_GOV_STOP: + if (atomic_dec_return(&active_count) > 1) + return 0; + + sysfs_remove_group(cpufreq_global_kobject, + &interactivex_attr_group); + + pm_idle = pm_idle_old; + del_timer(&per_cpu(cpu_timer, new_policy->cpu)); + enabled = 0; + unregister_early_suspend(&interactivex_power_suspend); + pr_info("[imoseyon] interactiveX inactive\n");

+ break; + + case CPUFREQ_GOV_LIMITS: + if (new_policy->max < new_policy->cur) + __cpufreq_driver_target(new_policy, + new_policy->max, CPUFREQ_RELATION_H); + else if (new_policy->min > new_policy->cur) + __cpufreq_driver_target(new_policy, + new_policy->min, CPUFREQ_RELATION_L); + break; + } + return 0; +} + +static int __init cpufreq_interactivex_init(void) +{ + unsigned int i; + struct timer_list *t; + min_sample_time = DEFAULT_MIN_SAMPLE_TIME; + resum_speed = RESUME_SPEED; + freq_threshld = FREQ_THRESHOLD; + + /* Initalize per-cpu timers */ + for_each_possible_cpu(i) { + t = &per_cpu(cpu_timer, i); + init_timer_deferrable(t); + t->function = cpufreq_interactivex_timer; + t->data = i; + } + + /* Scale up is high priority */ + up_wq = create_workqueue("kinteractive_up"); + down_wq = create_workqueue("knteractive_down"); + + INIT_WORK(&freq_scale_work, cpufreq_interactivex_freq_change_time_work); + + pr_info("[imoseyon] interactiveX enter\n"); + return cpufreq_register_governor(&cpufreq_gov_interactivex); +} + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX +fs_initcall(cpufreq_interactivex_init); +#else +module_init(cpufreq_interactivex_init); +#endif + +static void __exit cpufreq_interactivex_exit(void) +{ + pr_info("[imoseyon] interactiveX exit\n"); + cpufreq_unregister_governor(&cpufreq_gov_interactivex); + destroy_workqueue(up_wq); + destroy_workqueue(down_wq); +} + +module_exit(cpufreq_interactivex_exit); + +MODULE_AUTHOR("Mike Chan <mike@android.com>"); +MODULE_DESCRIPTION("'cpufreq_interactiveX' - A cpufreq governor for " + "Latency sensitive workloads"); +MODULE_LICENSE("GPL");

diff -uNr Vanilla/drivers/cpufreq/cpufreq_lionheart.c New/drivers/cpufreq/cpufre q_lionheart.c --- Vanilla/drivers/cpufreq/cpufreq_lionheart.c 1969-12-31 18:00:00.000000000 -0 600 +++ New/drivers/cpufreq/cpufreq_lionheart.c 2012-06-30 19:56:47.302115034 -0 500 @@ -0,0 +1,543 @@ +/* + * drivers/cpufreq/cpufreq_lionheart.c + * + * Patched & tweaked: knzo + * + * Based on the Conservative governor by: + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. + * Jun Nakajima <jun.nakajima@intel.com> + * (C) 2009 Alexander Clouter <alex@digriz.org.uk> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/cpufreq.h> +#include <linux/cpu.h> +#include <linux/jiffies.h> +#include <linux/kernel_stat.h> +#include <linux/mutex.h> +#include <linux/hrtimer.h> +#include <linux/tick.h> +#include <linux/ktime.h> +#include <linux/sched.h> + +#define DEF_FREQUENCY_UP_THRESHOLD (70) +#define DEF_FREQUENCY_DOWN_THRESHOLD (30) +#define MIN_SAMPLING_RATE_RATIO (2) + +static unsigned int min_sampling_rate; + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define DEF_SAMPLING_DOWN_FACTOR (1) +#define MAX_SAMPLING_DOWN_FACTOR (10) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +static void do_dbs_timer(struct work_struct *work); + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_wall;

+ cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + unsigned int down_skip; + unsigned int requested_freq; + int cpu; + unsigned int enable:1; + + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); + +static unsigned int dbs_enable; + +static DEFINE_MUTEX(dbs_mutex); + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int sampling_down_factor; + unsigned int up_threshold; + unsigned int down_threshold; + unsigned int ignore_nice; + unsigned int freq_step; +} dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .ignore_nice = 0, + .freq_step = 5, +}; + +static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, + cputime64_t *wall) +{ + cputime64_t idle_time; + cputime64_t cur_wall_time; + cputime64_t busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, + kstat_cpu(cpu).cpustat.system); + + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); + + idle_time = cputime64_sub(cur_wall_time, busy_time); + if (wall) + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + + return (cputime64_t)jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall ) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, wall); + + if (idle_time == -1ULL)

+ return get_cpu_idle_time_jiffy(cpu, wall); + + return idle_time; +} + +static int +dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_freqs *freq = data; + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, + freq->cpu); + + struct cpufreq_policy *policy; + + if (!this_dbs_info->enable) + return 0; + + policy = this_dbs_info->cur_policy; + + if (this_dbs_info->requested_freq > policy->max + || this_dbs_info->requested_freq < policy->min) + this_dbs_info->requested_freq = freq->new; + + return 0; +} + +static struct notifier_block dbs_cpufreq_notifier_block = { + .notifier_call = dbs_cpufreq_notifier +}; + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_min); + +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} + +show_one(sampling_rate, sampling_rate); +show_one(sampling_down_factor, sampling_down_factor); +show_one(up_threshold, up_threshold); +show_one(down_threshold, down_threshold); +show_one(ignore_nice_load, ignore_nice); +show_one(freq_step, freq_step); + +static ssize_t store_sampling_down_factor(struct kobject *a, + struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret;

+ + + + + + + +} + +static + +{ + + + + + + + + + +} + +static + +{ + + + + + + + + + + +} + +static + +{ + + + + + + + + + + +} + +static + +{ + + + +

ret = sscanf(buf, "%u", &input); if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) return -EINVAL; dbs_tuners_ins.sampling_down_factor = input; return count; ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, const char *buf, size_t count) unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); return count; ssize_t store_up_threshold(struct kobject *a, struct attribute *b, const char *buf, size_t count) unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1 || input > 100 || input <= dbs_tuners_ins.down_threshold) return -EINVAL; dbs_tuners_ins.up_threshold = input; return count; ssize_t store_down_threshold(struct kobject *a, struct attribute *b, const char *buf, size_t count) unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1 || input < 11 || input > 100 || input >= dbs_tuners_ins.up_threshold) return -EINVAL; dbs_tuners_ins.down_threshold = input; return count; ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, const char *buf, size_t count) unsigned int input; int ret; unsigned int j;

+ + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + if (input == dbs_tuners_ins.ignore_nice) + return count; + + dbs_tuners_ins.ignore_nice = input; + + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(cs_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + } + return count; +} + +static ssize_t store_freq_step(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input > 100) + input = 100; + + dbs_tuners_ins.freq_step = input; + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(sampling_down_factor); +define_one_global_rw(up_threshold); +define_one_global_rw(down_threshold); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(freq_step); + +static struct attribute *dbs_attributes[] = { + &sampling_rate_min.attr, + &sampling_rate.attr, + &sampling_down_factor.attr, + &up_threshold.attr, + &down_threshold.attr, + &ignore_nice_load.attr, + &freq_step.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = {

+ + +}; + +static +{ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

.attrs = dbs_attributes, .name = "Lionheart", void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) unsigned int load = 0; unsigned int max_load = 0; unsigned int freq_target; struct cpufreq_policy *policy; unsigned int j; policy = this_dbs_info->cur_policy; for_each_cpu(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; cputime64_t cur_wall_time, cur_idle_time; unsigned int idle_time, wall_time; j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); wall_time = (unsigned int) cputime64_sub(cur_wall_time, j_dbs_info->prev_cpu_wall); j_dbs_info->prev_cpu_wall = cur_wall_time; idle_time = (unsigned int) cputime64_sub(cur_idle_time, j_dbs_info->prev_cpu_idle); j_dbs_info->prev_cpu_idle = cur_idle_time; if (dbs_tuners_ins.ignore_nice) { cputime64_t cur_nice; unsigned long cur_nice_jiffies; cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, j_dbs_info->prev_cpu_nice); cur_nice_jiffies = (unsigned long) cputime64_to_jiffies64(cur_nice); j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; idle_time += jiffies_to_usecs(cur_nice_jiffies); } if (unlikely(!wall_time || wall_time < idle_time)) continue; load = 100 * (wall_time - idle_time) / wall_time; if (load > max_load) max_load = load; } if (dbs_tuners_ins.freq_step == 0) return; if (max_load > dbs_tuners_ins.up_threshold) { this_dbs_info->down_skip = 0;

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +} + +static +{ + + + + + + + + + + + + + + +} + +static +{ + + + + + +

if (this_dbs_info->requested_freq == policy->max) return; freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; if (unlikely(freq_target == 0)) freq_target = 5; this_dbs_info->requested_freq += freq_target; if (this_dbs_info->requested_freq > policy->max) this_dbs_info->requested_freq = policy->max; __cpufreq_driver_target(policy, this_dbs_info->requested_freq, CPUFREQ_RELATION_H); return; } if (max_load < (dbs_tuners_ins.down_threshold - 10)) { freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; this_dbs_info->requested_freq -= freq_target; if (this_dbs_info->requested_freq < policy->min) this_dbs_info->requested_freq = policy->min; if (policy->cur == policy->min) return; __cpufreq_driver_target(policy, this_dbs_info->requested_freq, CPUFREQ_RELATION_H); return; } void do_dbs_timer(struct work_struct *work) struct cpu_dbs_info_s *dbs_info = container_of(work, struct cpu_dbs_info_s, work.work); unsigned int cpu = dbs_info->cpu; int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); // delay -= jiffies % delay; mutex_lock(&dbs_info->timer_mutex); dbs_check_cpu(dbs_info); schedule_delayed_work_on(cpu, &dbs_info->work, delay); mutex_unlock(&dbs_info->timer_mutex); inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); // delay -= jiffies % delay; dbs_info->enable = 1; INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);

+} + +static +{ + + +} + +static + +{ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) dbs_info->enable = 0; cancel_delayed_work_sync(&dbs_info->work); int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event) unsigned int cpu = policy->cpu; struct cpu_dbs_info_s *this_dbs_info; unsigned int j; int rc; this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); switch (event) { case CPUFREQ_GOV_START: if ((!cpu_online(cpu)) || (!policy->cur)) return -EINVAL; mutex_lock(&dbs_mutex); for_each_cpu(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); j_dbs_info->cur_policy = policy; j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &j_dbs_info->prev_cpu_wall); if (dbs_tuners_ins.ignore_nice) { j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; } } this_dbs_info->down_skip = 0; this_dbs_info->requested_freq = policy->cur; mutex_init(&this_dbs_info->timer_mutex); dbs_enable++; if (dbs_enable == 1) { unsigned int latency; latency = policy->cpuinfo.transition_latency / 1000; if (latency == 0) latency = 1; rc = sysfs_create_group(cpufreq_global_kobject, &dbs_attr_group); if (rc) { mutex_unlock(&dbs_mutex); return rc; } min_sampling_rate = 10000; dbs_tuners_ins.sampling_rate = 10000;

+ cpufreq_register_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + mutex_unlock(&dbs_mutex); + + dbs_timer_init(this_dbs_info); + + break; + + case CPUFREQ_GOV_STOP: + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + dbs_enable--; + mutex_destroy(&this_dbs_info->timer_mutex); + + if (dbs_enable == 0) + cpufreq_unregister_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + mutex_unlock(&dbs_mutex); + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target( + this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target( + this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&this_dbs_info->timer_mutex); + + break; + } + return 0; +} + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_LIONHEART +static +#endif +struct cpufreq_governor cpufreq_gov_lionheart = { + .name = "Lionheart", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +static int __init cpufreq_gov_dbs_init(void) +{ + return cpufreq_register_governor(&cpufreq_gov_lionheart); +}

+ +static void __exit cpufreq_gov_dbs_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_lionheart); +} + +MODULE_AUTHOR("knzo"); +MODULE_DESCRIPTION("'cpufreq_lionheart' - A brave and agile conservative-based governor."); +MODULE_LICENSE("GPL"); + +fs_initcall(cpufreq_gov_dbs_init); +module_exit(cpufreq_gov_dbs_exit); diff -uNr Vanilla/drivers/cpufreq/cpufreq_ondemandx.c New/drivers/cpufreq/cpufre q_ondemandx.c --- Vanilla/drivers/cpufreq/cpufreq_ondemandx.c 1969-12-31 18:00:00.000000000 -0 600 +++ New/drivers/cpufreq/cpufreq_ondemandx.c 2012-06-30 20:32:38.418143218 -0 500 @@ -0,0 +1,862 @@ +/* +* drivers/cpufreq/cpufreq_ondemandx.c +* +* Copyright (C) 2001 Russell King +* (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. +* Jun Nakajima <jun.nakajima@intel.com> +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License version 2 as +* published by the Free Software Foundation. +*/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/cpufreq.h> +#include <linux/cpu.h> +#include <linux/jiffies.h> +#include <linux/kernel_stat.h> +#include <linux/mutex.h> +#include <linux/hrtimer.h> +#include <linux/tick.h> +#include <linux/ktime.h> +#include <linux/sched.h> +#include <linux/earlysuspend.h> + +/* +* dbs is used in this file as a shortform for demandbased switching +* It helps to keep variable names smaller, simpler +*/ + +#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (15) +#define DEF_FREQUENCY_UP_THRESHOLD (85) +#define DEF_SAMPLING_DOWN_FACTOR (50) +#define MAX_SAMPLING_DOWN_FACTOR (100000) +#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) +#define MICRO_FREQUENCY_UP_THRESHOLD (95) +#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) +#define MIN_FREQUENCY_UP_THRESHOLD (11) +#define MAX_FREQUENCY_UP_THRESHOLD (100)

+#define DEF_SUSPEND_FREQ (384000) + +/* +* The polling frequency of this governor depends on the capability of +* the processor. Default polling frequency is 1000 times the transition +* latency of the processor. The governor will work on any processor with +* transition latency <= 10mS, using appropriate sampling +* rate. +* For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) +* this governor will not work. +* All times here are in uS. +*/ +#define MIN_SAMPLING_RATE_RATIO (2) + +static unsigned int min_sampling_rate; + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, +unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMANDX +static +#endif +struct cpufreq_governor cpufreq_gov_ondemandx = { + .name = "ondemandx", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +/* Sampling types */ +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +struct cpu_dbs_info_s { +cputime64_t prev_cpu_idle; +cputime64_t prev_cpu_iowait; +cputime64_t prev_cpu_wall; +cputime64_t prev_cpu_nice; +struct cpufreq_policy *cur_policy; +struct delayed_work work; +struct cpufreq_frequency_table *freq_table; +unsigned int freq_lo; +unsigned int freq_lo_jiffies; +unsigned int freq_hi_jiffies; +unsigned int rate_mult; +int cpu; +unsigned int sample_type:1; +/* +* percpu mutex that serializes governor limit change with +* do_dbs_timer invocation. We do not want do_dbs_timer to run +* when user is changing the governor or limits. +*/ +struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); +

+/* +* dbs_mutex protects dbs_enable in governor start/stop. +*/ +static DEFINE_MUTEX(dbs_mutex); + +static struct dbs_tuners { +unsigned int sampling_rate; +unsigned int up_threshold; +unsigned int down_differential; +unsigned int ignore_nice; +unsigned int sampling_down_factor; +unsigned int powersave_bias; +unsigned int io_is_busy; +unsigned int suspend_freq; +} dbs_tuners_ins = { +.up_threshold = DEF_FREQUENCY_UP_THRESHOLD, +.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, +.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, +.ignore_nice = 0, +.powersave_bias = 50, +.suspend_freq = DEF_SUSPEND_FREQ, +}; + + + +static unsigned int dbs_enable=0; /* number of CPUs using this policy */ + +// used for imoseyon's mods +static unsigned int suspended = 0; +static void ondemandx_suspend(int suspend) +{ + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, smp_process or_id()); + if (dbs_enable==0) return; + if (!suspend) { // resume at max speed: + suspended = 0; + __cpufreq_driver_target(dbs_info->cur_policy, dbs_info->cur_pol icy->max, +CPUFREQ_RELATION_L); + pr_info("[imoseyon] ondemandx awake at %d\n", dbs_info->cur_pol icy->cur); + } else { + suspended = 1; +// let's give it a little breathing room + __cpufreq_driver_target(dbs_info->cur_policy, dbs_tuners_ins.su spend_freq, CPUFREQ_RELATION_H); + pr_info("[imoseyon] ondemandx suspended at %d\n", dbs_info->cur _policy->cur); + } +} + +static void ondemandx_early_suspend(struct early_suspend *handler) { + ondemandx_suspend(1); +} + +static void ondemandx_late_resume(struct early_suspend *handler) { + ondemandx_suspend(0); +} + +static struct early_suspend ondemandx_power_suspend = {

+ .suspend = ondemandx_early_suspend, + .resume = ondemandx_late_resume, + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, +}; + +static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, +cputime64_t *wall) +{ +cputime64_t idle_time; +cputime64_t cur_wall_time; +cputime64_t busy_time; + +cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); +busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, +kstat_cpu(cpu).cpustat.system); + +busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); +busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); +busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); +busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); + +idle_time = cputime64_sub(cur_wall_time, busy_time); +if (wall) +*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + +return (cputime64_t)jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall ) +{ +u64 idle_time = get_cpu_idle_time_us(cpu, wall); + +if (idle_time == -1ULL) +return get_cpu_idle_time_jiffy(cpu, wall); + +return idle_time; +} + +static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wa ll) +{ +u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + +if (iowait_time == -1ULL) +return 0; + +return iowait_time; +} + +/* +* Find right freq to be set now with powersave_bias on. +* Returns the freq_hi to be used right now and will set freq_hi_jiffies, +* freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. +*/ +static unsigned int powersave_bias_target(struct cpufreq_policy *policy, +unsigned int freq_next, +unsigned int relation) +{ +unsigned int freq_req, freq_reduc, freq_avg;

+unsigned int freq_hi, freq_lo; +unsigned int index = 0; +unsigned int jiffies_total, jiffies_hi, jiffies_lo; +struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, +policy->cpu); + +if (!dbs_info->freq_table) { +dbs_info->freq_lo = 0; +dbs_info->freq_lo_jiffies = 0; +return freq_next; +} + +cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, +relation, &index); +freq_req = dbs_info->freq_table[index].frequency; +freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; +freq_avg = freq_req - freq_reduc; + +/* Find freq bounds for freq_avg in freq_table */ +index = 0; +cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, +CPUFREQ_RELATION_H, &index); +freq_lo = dbs_info->freq_table[index].frequency; +index = 0; +cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, +CPUFREQ_RELATION_L, &index); +freq_hi = dbs_info->freq_table[index].frequency; + +/* Find out how long we have to be in hi and lo freqs */ +if (freq_hi == freq_lo) { +dbs_info->freq_lo = 0; +dbs_info->freq_lo_jiffies = 0; +return freq_lo; +} +jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); +jiffies_hi = (freq_avg - freq_lo) * jiffies_total; +jiffies_hi += ((freq_hi - freq_lo) / 2); +jiffies_hi /= (freq_hi - freq_lo); +jiffies_lo = jiffies_total - jiffies_hi; +dbs_info->freq_lo = freq_lo; +dbs_info->freq_lo_jiffies = jiffies_lo; +dbs_info->freq_hi_jiffies = jiffies_hi; +return freq_hi; +} + +static void ondemandx_powersave_bias_init_cpu(int cpu) +{ +struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); +dbs_info->freq_table = cpufreq_frequency_get_table(cpu); +dbs_info->freq_lo = 0; +} + +static void ondemandx_powersave_bias_init(void) +{ +int i; +for_each_online_cpu(i) { +ondemandx_powersave_bias_init_cpu(i); +} +} +

+/************************** sysfs interface ************************/ + +static ssize_t show_sampling_rate_min(struct kobject *kobj, +struct attribute *attr, char *buf) +{ +return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_min); + +/* cpufreq_ondemandx Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ +return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(io_is_busy, io_is_busy); +show_one(up_threshold, up_threshold); +show_one(down_differential, down_differential); +show_one(sampling_down_factor, sampling_down_factor); +show_one(ignore_nice_load, ignore_nice); +show_one(powersave_bias, powersave_bias); +show_one(suspend_freq, suspend_freq); + + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, +const char *buf, size_t count) +{ +unsigned int input; +int ret; +ret = sscanf(buf, "%u", &input); +if (ret != 1) +return -EINVAL; +dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); +return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, +const char *buf, size_t count) +{ +unsigned int input; +int ret; + +ret = sscanf(buf, "%u", &input); +if (ret != 1) +return -EINVAL; +dbs_tuners_ins.io_is_busy = !!input; +return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, +const char *buf, size_t count) +{ +unsigned int input; +int ret; +ret = sscanf(buf, "%u", &input); + +if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||

+input < MIN_FREQUENCY_UP_THRESHOLD) { +return -EINVAL; +} +dbs_tuners_ins.up_threshold = input; +return count; +} + +static ssize_t store_sampling_down_factor(struct kobject *a, +struct attribute *b, const char *buf, size_t count) +{ +unsigned int input, j; +int ret; +ret = sscanf(buf, "%u", &input); + +if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) +return -EINVAL; +dbs_tuners_ins.sampling_down_factor = input; + +/* Reset down sampling multiplier in case it was active */ +for_each_online_cpu(j) { +struct cpu_dbs_info_s *dbs_info; +dbs_info = &per_cpu(od_cpu_dbs_info, j); +dbs_info->rate_mult = 1; +} +return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, +const char *buf, size_t count) +{ +unsigned int input; +int ret; + +unsigned int j; + +ret = sscanf(buf, "%u", &input); +if (ret != 1) +return -EINVAL; + +if (input > 1) +input = 1; + +if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ +return count; +} +dbs_tuners_ins.ignore_nice = input; + +/* we need to re-evaluate prev_cpu_idle */ +for_each_online_cpu(j) { +struct cpu_dbs_info_s *dbs_info; +dbs_info = &per_cpu(od_cpu_dbs_info, j); +dbs_info->prev_cpu_idle = get_cpu_idle_time(j, +&dbs_info->prev_cpu_wall); +if (dbs_tuners_ins.ignore_nice) +dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + +} +return count; +} +

+static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, +const char *buf, size_t count) +{ +unsigned int input; +int ret; +ret = sscanf(buf, "%u", &input); + +if (ret != 1) +return -EINVAL; + +if (input > 1000) +input = 1000; + +dbs_tuners_ins.powersave_bias = input; +ondemandx_powersave_bias_init(); +return count; +} + + + +static ssize_t store_down_differential(struct kobject *a, struct attribute *b, +const char *buf, size_t count) +{ +unsigned int input; +int ret; +ret = sscanf(buf, "%u", &input); + +if (ret != 1) +return -EINVAL; + +if (input > 30) +input = 30; + +if (input < 0) +input = 0; + +mutex_lock(&dbs_mutex); +dbs_tuners_ins.down_differential = input; +mutex_unlock(&dbs_mutex); + +return count; +} + +static ssize_t store_suspend_freq(struct kobject *a, struct attribute *b, +const char *buf, size_t count) +{ +unsigned int input; +int ret; +ret = sscanf(buf, "%u", &input); + +if (ret != 1) +return -EINVAL; + +if (input > 2016000) +input = 2016000; + +if (input < 122000) +input = 122000; + +mutex_lock(&dbs_mutex);

+dbs_tuners_ins.suspend_freq = input; +mutex_unlock(&dbs_mutex); + +return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(io_is_busy); +define_one_global_rw(up_threshold); +define_one_global_rw(down_differential); +define_one_global_rw(sampling_down_factor); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(powersave_bias); +define_one_global_rw(suspend_freq); + +static struct attribute *dbs_attributes[] = { +&sampling_rate_min.attr, +&sampling_rate.attr, +&up_threshold.attr, +&down_differential.attr, +&sampling_down_factor.attr, +&ignore_nice_load.attr, +&powersave_bias.attr, +&io_is_busy.attr, +&suspend_freq.attr, +NULL +}; + +static struct attribute_group dbs_attr_group = { +.attrs = dbs_attributes, +.name = "ondemandx", +}; + +/************************** sysfs end ************************/ + +static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) +{ +if (dbs_tuners_ins.powersave_bias) +freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); +else if (p->cur == p->max) +return; +if (suspended && freq > dbs_tuners_ins.suspend_freq) { +freq = dbs_tuners_ins.suspend_freq; +__cpufreq_driver_target(p, freq, CPUFREQ_RELATION_H); +} else +__cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ? + CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); +} + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ +unsigned int max_load_freq; + +struct cpufreq_policy *policy; +unsigned int j; + +this_dbs_info->freq_lo = 0; +policy = this_dbs_info->cur_policy; + +/*

+* Every sampling_rate, we check, if current idle time is less +* than 20% (default), then we try to increase frequency +* Every sampling_rate, we look for a the lowest +* frequency which can sustain the load while keeping idle time over +* 30%. If such a frequency exist, we try to decrease to this frequency. +* +* Any frequency increase takes it to the maximum frequency. +* Frequency reduction happens at minimum steps of +* 5% (default) of current frequency +*/ + +/* Get Absolute Load - in terms of freq */ +max_load_freq = 0; + +for_each_cpu(j, policy->cpus) { +struct cpu_dbs_info_s *j_dbs_info; +cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; +unsigned int idle_time, wall_time, iowait_time; +unsigned int load, load_freq; +int freq_avg; + +j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + +cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); +cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + +wall_time = (unsigned int) cputime64_sub(cur_wall_time, +j_dbs_info->prev_cpu_wall); +j_dbs_info->prev_cpu_wall = cur_wall_time; + +idle_time = (unsigned int) cputime64_sub(cur_idle_time, +j_dbs_info->prev_cpu_idle); +j_dbs_info->prev_cpu_idle = cur_idle_time; + +iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, +j_dbs_info->prev_cpu_iowait); +j_dbs_info->prev_cpu_iowait = cur_iowait_time; + +if (dbs_tuners_ins.ignore_nice) { +cputime64_t cur_nice; +unsigned long cur_nice_jiffies; + +cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, +j_dbs_info->prev_cpu_nice); +/* +* Assumption: nice time between sampling periods will +* be less than 2^32 jiffies for 32 bit sys +*/ +cur_nice_jiffies = (unsigned long) +cputime64_to_jiffies64(cur_nice); + +j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; +idle_time += jiffies_to_usecs(cur_nice_jiffies); +} + +/* +* For the purpose of ondemandx, waiting for disk IO is an +* indication that you're performance critical, and not that +* the system is actually idle. So subtract the iowait time +* from the cpu idle time.

+*/ + +if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) +idle_time -= iowait_time; + +if (unlikely(!wall_time || wall_time < idle_time)) +continue; + +load = 100 * (wall_time - idle_time) / wall_time; + +freq_avg = __cpufreq_driver_getavg(policy, j); +if (freq_avg <= 0) +freq_avg = policy->cur; + +load_freq = load * freq_avg; +if (load_freq > max_load_freq) +max_load_freq = load_freq; +} + +/* Check for frequency increase */ +if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { +/* If switching to max speed, apply sampling_down_factor */ +if (policy->cur < policy->max) +this_dbs_info->rate_mult = +dbs_tuners_ins.sampling_down_factor; +dbs_freq_increase(policy, policy->max); +return; +} + +/* Check for frequency decrease */ +/* if we cannot reduce the frequency anymore, break out early */ +if (policy->cur == policy->min) +return; + +/* +* The optimal frequency is the frequency that is the lowest that +* can support the current CPU usage without triggering the up +* policy. To be safe, we focus 10 points under the threshold. +*/ +if (max_load_freq < +(dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * +policy->cur) { +unsigned int freq_next; +freq_next = max_load_freq / +(dbs_tuners_ins.up_threshold +dbs_tuners_ins.down_differential); + +/* No longer fully busy, reset rate_mult */ +this_dbs_info->rate_mult = 1; + +if (freq_next < policy->min) +freq_next = policy->min; + +if (!dbs_tuners_ins.powersave_bias) { +__cpufreq_driver_target(policy, freq_next, +CPUFREQ_RELATION_L); +} else { +int freq = powersave_bias_target(policy, freq_next, +CPUFREQ_RELATION_L); +__cpufreq_driver_target(policy, freq,

+CPUFREQ_RELATION_L); +} +} +} + +static void do_dbs_timer(struct work_struct *work) +{ +struct cpu_dbs_info_s *dbs_info = +container_of(work, struct cpu_dbs_info_s, work.work); +unsigned int cpu = dbs_info->cpu; +int sample_type = dbs_info->sample_type; + +int delay; + +mutex_lock(&dbs_info->timer_mutex); + +/* Common NORMAL_SAMPLE setup */ +dbs_info->sample_type = DBS_NORMAL_SAMPLE; +if (!dbs_tuners_ins.powersave_bias || +sample_type == DBS_NORMAL_SAMPLE) { +dbs_check_cpu(dbs_info); +if (dbs_info->freq_lo) { +/* Setup timer for SUB_SAMPLE */ +dbs_info->sample_type = DBS_SUB_SAMPLE; +delay = dbs_info->freq_hi_jiffies; +} else { +/* We want all CPUs to do sampling nearly on +* same jiffy +*/ +delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate +* dbs_info->rate_mult); + +if (num_online_cpus() > 1) +delay -= jiffies % delay; +} +} else { +if (!suspended) +__cpufreq_driver_target(dbs_info->cur_policy, +dbs_info->freq_lo, CPUFREQ_RELATION_H); +delay = dbs_info->freq_lo_jiffies; +} +schedule_delayed_work_on(cpu, &dbs_info->work, delay); +mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ +/* We want all CPUs to do sampling nearly on same jiffy */ +int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + +if (num_online_cpus() > 1) +delay -= jiffies % delay; + +dbs_info->sample_type = DBS_NORMAL_SAMPLE; +INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); +schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{

+cancel_delayed_work_sync(&dbs_info->work); +} + +/* +* Not all CPUs want IO time to be accounted as busy; this dependson how +* efficient idling at a higher frequency/voltage is. +* Pavel Machek says this is not so for various generations of AMD and old +* Intel systems. +* Mike Chan (androidlcom) calis this is also not true for ARM. +* Because of this, whitelist specific known (series) of CPUs by default, and +* leave all others up to the user. +*/ +static int should_io_be_busy(void) +{ +#if defined(CONFIG_X86) +/* +* For Intel, Core 2 (model 15) andl later have an efficient idle. +*/ +if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && +boot_cpu_data.x86 == 6 && +boot_cpu_data.x86_model >= 15) +return 1; +#endif +return 0; +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, +unsigned int event) +{ +unsigned int cpu = policy->cpu; +struct cpu_dbs_info_s *this_dbs_info; +unsigned int j; +int rc; + +this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + +switch (event) { +case CPUFREQ_GOV_START: +if ((!cpu_online(cpu)) || (!policy->cur)) +return -EINVAL; + +mutex_lock(&dbs_mutex); + +dbs_enable++; +for_each_cpu(j, policy->cpus) { +struct cpu_dbs_info_s *j_dbs_info; +j_dbs_info = &per_cpu(od_cpu_dbs_info, j); +j_dbs_info->cur_policy = policy; + +j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, +&j_dbs_info->prev_cpu_wall); +if (dbs_tuners_ins.ignore_nice) { +j_dbs_info->prev_cpu_nice = +kstat_cpu(j).cpustat.nice; +} +} +this_dbs_info->cpu = cpu; +this_dbs_info->rate_mult = 1; +ondemandx_powersave_bias_init_cpu(cpu); +/*

+* Start the timerschedule work, when this governor +* is used for first time +*/ +if (dbs_enable == 1) { +unsigned int latency; + +rc = sysfs_create_group(cpufreq_global_kobject, +&dbs_attr_group); +if (rc) { +mutex_unlock(&dbs_mutex); +return rc; +} + +/* policy latency is in nS. Convert it to uS first */ +latency = policy->cpuinfo.transition_latency / 1000; +if (latency == 0) +latency = 1; +/* Bring kernel and HW constraints together */ +min_sampling_rate = max(min_sampling_rate, +MIN_LATENCY_MULTIPLIER * latency); +dbs_tuners_ins.sampling_rate = +max(min_sampling_rate, +latency * LATENCY_MULTIPLIER); +dbs_tuners_ins.io_is_busy = should_io_be_busy(); +} +mutex_unlock(&dbs_mutex); + +mutex_init(&this_dbs_info->timer_mutex); +dbs_timer_init(this_dbs_info); + register_early_suspend(&ondemandx_power_suspend); + pr_info("[imoseyon] ondemandx active\n"); +break; + +case CPUFREQ_GOV_STOP: +dbs_timer_exit(this_dbs_info); + +mutex_lock(&dbs_mutex); +mutex_destroy(&this_dbs_info->timer_mutex); +dbs_enable--; +mutex_unlock(&dbs_mutex); +if (!dbs_enable) +sysfs_remove_group(cpufreq_global_kobject, +&dbs_attr_group); + unregister_early_suspend(&ondemandx_power_suspend); + pr_info("[imoseyon] ondemandx inactive\n"); +break; + +case CPUFREQ_GOV_LIMITS: +mutex_lock(&this_dbs_info->timer_mutex); +if (policy->max < this_dbs_info->cur_policy->cur) +__cpufreq_driver_target(this_dbs_info->cur_policy, +policy->max, CPUFREQ_RELATION_H); +else if (policy->min > this_dbs_info->cur_policy->cur) +__cpufreq_driver_target(this_dbs_info->cur_policy, +policy->min, CPUFREQ_RELATION_L); +mutex_unlock(&this_dbs_info->timer_mutex); +break; +} +return 0; +}

+ +static int __init cpufreq_gov_dbs_init(void) +{ +cputime64_t wall; +u64 idle_time; +int cpu = get_cpu(); + +idle_time = get_cpu_idle_time_us(cpu, &wall); +put_cpu(); +if (idle_time != -1ULL) { +/* Idle micro accounting is supported. Use finer thresholds */ +dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; +dbs_tuners_ins.down_differential = +MICRO_FREQUENCY_DOWN_DIFFERENTIAL; +/* +* In no_hz/micro accounting case we set the minimum frequency +* not depending on HZ, but fixed (very low). The deferred +* timer might skip some samples if idle/sleeping as needed. +*/ +min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; +} else { +/* For correct statistics, we need 10 ticks for each measure */ +min_sampling_rate = +MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); +} + + pr_info("[imoseyon] ondemandx enter\n"); +return cpufreq_register_governor(&cpufreq_gov_ondemandx); +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + pr_info("[imoseyon] ondemandx exit\n"); +cpufreq_unregister_governor(&cpufreq_gov_ondemandx); +} + + +MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); +MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>"); +MODULE_DESCRIPTION("'cpufreq_ondemandx' - A dynamic cpufreq governor for " +"Low Latency Frequency Transition capable processors"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMANDX +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff -uNr Vanilla/drivers/cpufreq/Kconfig New/drivers/cpufreq/Kconfig --- Vanilla/drivers/cpufreq/Kconfig 2011-11-30 01:52:01.000000000 -0600 +++ New/drivers/cpufreq/Kconfig 2012-07-06 17:45:58.000000000 -0500 @@ -99,6 +99,19 @@ governor. If unsure have a look at the help section of the driver. Fallback governor will be the performance governor. +config + + + CPU_FREQ_DEFAULT_GOV_ONDEMANDX bool "ondemandx" select CPU_FREQ_GOV_ONDEMANDX select CPU_FREQ_GOV_PERFORMANCE

+ + + + + + + + +

help Use the CPUFreq governor 'ondemand' as default. This allows you to get a full dynamic frequency capable system by simply loading your cpufreq low-level hardware driver. Be aware that not all cpufreq drivers support the ondemand governor. If unsure have a look at the help section of the driver. Fallback governor will be the performance governor. OndemandX has built in sleep profile, but not working Sysfs interface config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE bool "conservative" select CPU_FREQ_GOV_CONSERVATIVE @@ -119,6 +132,20 @@ you to get a full dynamic cpu frequency capable system by simply loading your cpufreq low-level hardware driver, using the 'interactive' governor for latency-sensitive workloads. +config CPU_FREQ_DEFAULT_GOV_INTERACTIVEX + bool "interactiveX" + select CPU_FREQ_GOV_INTERACTIVEX + help + Use the CPUFreq governor 'interactiveX' as default. This allows + you to get a full dynamic cpu frequency capable system by simply + loading your cpufreq low-level hardware driver, using the + 'interactiveX' governor for latency-sensitive workloads. + +config CPU_FREQ_DEFAULT_GOV_LIONHEART + bool "lionheart" + select CPU_FREQ_GOV_LIONHEART + help + Use the CPUFreq governor 'Lionheart' as default. endchoice @@ -159,6 +186,31 @@ If in doubt, say Y. +config + + + + + + + + + + + + + + + + + +config + + + + CPU_FREQ_GOV_ONDEMANDX tristate "'ondemandx' cpufreq policy governor" select CPU_FREQ_TABLE help 'ondemand' - This driver adds a dynamic cpufreq policy governor. The governor does a periodic polling and changes frequency based on the CPU utilization. The support for this governor depends on CPU capability to do fast frequency switching (i.e, very low latency frequency transitions). To compile this driver as a module, choose M here: the module will be called cpufreq_ondemand. For details, take a look at linux/Documentation/cpu-freq. If in doubt, say N. CPU_FREQ_GOV_ONDEMANDX_INPUT bool "Ramp up CPU frequency on input events" default y depends on CPU_FREQ_GOV_ONDEMANDX help

+ +

Enable installation of an input event handler which will ramp up the CPU to max frequency when an input event is received. config CPU_FREQ_GOV_ONDEMAND tristate "'ondemand' cpufreq policy governor" select CPU_FREQ_TABLE @@ -206,4 +258,14 @@ If in doubt, say N. +config CPU_FREQ_GOV_LIONHEART + tristate "'Lionheart' cpufreq governor" + depends on CPU_FREQ + help + 'Lionheart' - A brave and agile conservative-based governor. +config CPU_FREQ_GOV_INTERACTIVEX + tristate "'interactiveX' cpufreq policy governor" + help + 'interactiveX' - Modified version of interactive with sleep+wake code. + endif # CPU_FREQ diff -uNr Vanilla/drivers/cpufreq/Makefile New/drivers/cpufreq/Makefile --- Vanilla/drivers/cpufreq/Makefile 2011-11-30 01:52:01.000000000 -0600 +++ New/drivers/cpufreq/Makefile 2012-07-06 17:46:32.000000000 -0500 @@ -7,9 +7,12 @@ obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o +obj-$(CONFIG_CPU_FREQ_GOV_ONDEMANDX) += cpufreq_ondemandx.o obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o +obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVEX) += cpufreq_interactivex.o +obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o diff -uNr Vanilla/include/linux/cpufreq.h New/include/linux/cpufreq.h --- Vanilla/include/linux/cpufreq.h 2011-11-30 01:51:52.000000000 -0600 +++ New/include/linux/cpufreq.h 2012-07-06 17:49:32.474953576 -0500 @@ -363,6 +363,9 @@ #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE) extern struct cpufreq_governor cpufreq_gov_userspace; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_userspace) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMANDX) +extern struct cpufreq_governor cpufreq_gov_ondemandx; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_ondemandx) #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND) extern struct cpufreq_governor cpufreq_gov_ondemand; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_ondemand) @@ -372,9 +375,14 @@ #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE) extern struct cpufreq_governor cpufreq_gov_interactive; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_interactive) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX) +extern struct cpufreq_governor cpufreq_gov_interactivex; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_interactivex) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_LIONHEART) +extern struct cpufreq_governor cpufreq_gov_lionheart; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_lionheart) #endif

/********************************************************************* * FREQUENCY TABLE HELPERS * *********************************************************************/

Anda mungkin juga menyukai