2 * drivers/cpufreq/cpufreq_ondemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/cpufreq.h>
17 #include <linux/cpu.h>
18 #include <linux/jiffies.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/mutex.h>
21 #include <linux/hrtimer.h>
22 #include <linux/tick.h>
23 #include <linux/ktime.h>
24 #include <linux/sched.h>
27 * dbs is used in this file as a shortform for demandbased switching
28 * It helps to keep variable names smaller, simpler
31 #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
32 #define DEF_FREQUENCY_UP_THRESHOLD (80)
33 #define DEF_SAMPLING_DOWN_FACTOR (1)
34 #define MAX_SAMPLING_DOWN_FACTOR (100000)
35 #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
36 #define MICRO_FREQUENCY_UP_THRESHOLD (95)
37 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
38 #define MIN_FREQUENCY_UP_THRESHOLD (11)
39 #define MAX_FREQUENCY_UP_THRESHOLD (100)
40 #define MAX_DEFAULT_SAMPLING_RATE (300 * 1000U)
43 * The polling frequency of this governor depends on the capability of
44 * the processor. Default polling frequency is 1000 times the transition
45 * latency of the processor. The governor will work on any processor with
46 * transition latency <= 10mS, using appropriate sampling
48 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
49 * this governor will not work.
50 * All times here are in uS.
52 #define MIN_SAMPLING_RATE_RATIO (2)
54 static unsigned int min_sampling_rate;
56 #define LATENCY_MULTIPLIER (1000)
57 #define MIN_LATENCY_MULTIPLIER (100)
58 #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
60 static void do_dbs_timer(struct work_struct *work);
61 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
64 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
67 struct cpufreq_governor cpufreq_gov_ondemand = {
69 .governor = cpufreq_governor_dbs,
70 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
75 enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
77 struct cpu_dbs_info_s {
78 cputime64_t prev_cpu_idle;
79 cputime64_t prev_cpu_iowait;
80 cputime64_t prev_cpu_wall;
81 cputime64_t prev_cpu_nice;
82 struct cpufreq_policy *cur_policy;
83 struct delayed_work work;
84 struct cpufreq_frequency_table *freq_table;
86 unsigned int freq_lo_jiffies;
87 unsigned int freq_hi_jiffies;
88 unsigned int rate_mult;
90 unsigned int sample_type:1;
92 * percpu mutex that serializes governor limit change with
93 * do_dbs_timer invocation. We do not want do_dbs_timer to run
94 * when user is changing the governor or limits.
96 struct mutex timer_mutex;
98 static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
100 static unsigned int dbs_enable; /* number of CPUs using this policy */
103 * dbs_mutex protects dbs_enable in governor start/stop.
105 static DEFINE_MUTEX(dbs_mutex);
107 static struct dbs_tuners {
108 unsigned int sampling_rate;
109 unsigned int up_threshold;
110 unsigned int down_differential;
111 unsigned int ignore_nice;
112 unsigned int sampling_down_factor;
113 unsigned int powersave_bias;
114 unsigned int io_is_busy;
116 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
117 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
118 .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
123 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
129 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
131 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
132 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
133 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
134 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
135 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
136 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
138 idle_time = cur_wall_time - busy_time;
140 *wall = jiffies_to_usecs(cur_wall_time);
142 return jiffies_to_usecs(idle_time);
145 static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
147 u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
149 if (idle_time == -1ULL)
150 return get_cpu_idle_time_jiffy(cpu, wall);
152 idle_time += get_cpu_iowait_time_us(cpu, wall);
157 static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
159 u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
161 if (iowait_time == -1ULL)
168 * Find right freq to be set now with powersave_bias on.
169 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
170 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
172 static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
173 unsigned int freq_next,
174 unsigned int relation)
176 unsigned int freq_req, freq_reduc, freq_avg;
177 unsigned int freq_hi, freq_lo;
178 unsigned int index = 0;
179 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
180 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
183 if (!dbs_info->freq_table) {
184 dbs_info->freq_lo = 0;
185 dbs_info->freq_lo_jiffies = 0;
189 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
191 freq_req = dbs_info->freq_table[index].frequency;
192 freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
193 freq_avg = freq_req - freq_reduc;
195 /* Find freq bounds for freq_avg in freq_table */
197 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
198 CPUFREQ_RELATION_H, &index);
199 freq_lo = dbs_info->freq_table[index].frequency;
201 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
202 CPUFREQ_RELATION_L, &index);
203 freq_hi = dbs_info->freq_table[index].frequency;
205 /* Find out how long we have to be in hi and lo freqs */
206 if (freq_hi == freq_lo) {
207 dbs_info->freq_lo = 0;
208 dbs_info->freq_lo_jiffies = 0;
211 jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
212 jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
213 jiffies_hi += ((freq_hi - freq_lo) / 2);
214 jiffies_hi /= (freq_hi - freq_lo);
215 jiffies_lo = jiffies_total - jiffies_hi;
216 dbs_info->freq_lo = freq_lo;
217 dbs_info->freq_lo_jiffies = jiffies_lo;
218 dbs_info->freq_hi_jiffies = jiffies_hi;
222 static void ondemand_powersave_bias_init_cpu(int cpu)
224 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
225 dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
226 dbs_info->freq_lo = 0;
229 static void ondemand_powersave_bias_init(void)
232 for_each_online_cpu(i) {
233 ondemand_powersave_bias_init_cpu(i);
237 /************************** sysfs interface ************************/
239 static ssize_t show_sampling_rate_min(struct kobject *kobj,
240 struct attribute *attr, char *buf)
242 return sprintf(buf, "%u\n", min_sampling_rate);
245 define_one_global_ro(sampling_rate_min);
247 /* cpufreq_ondemand Governor Tunables */
248 #define show_one(file_name, object) \
249 static ssize_t show_##file_name \
250 (struct kobject *kobj, struct attribute *attr, char *buf) \
252 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
254 show_one(sampling_rate, sampling_rate);
255 show_one(io_is_busy, io_is_busy);
256 show_one(up_threshold, up_threshold);
257 show_one(sampling_down_factor, sampling_down_factor);
258 show_one(ignore_nice_load, ignore_nice);
259 show_one(powersave_bias, powersave_bias);
262 * update_sampling_rate - update sampling rate effective immediately if needed.
263 * @new_rate: new sampling rate
265 * If new rate is smaller than the old, simply updaing
266 * dbs_tuners_int.sampling_rate might not be appropriate. For example,
267 * if the original sampling_rate was 1 second and the requested new sampling
268 * rate is 10 ms because the user needs immediate reaction from ondemand
269 * governor, but not sure if higher frequency will be required or not,
270 * then, the governor may change the sampling rate too late; up to 1 second
271 * later. Thus, if we are reducing the sampling rate, we need to make the
272 * new value effective immediately.
274 static void update_sampling_rate(unsigned int new_rate)
278 dbs_tuners_ins.sampling_rate = new_rate
279 = max(new_rate, min_sampling_rate);
281 for_each_online_cpu(cpu) {
282 struct cpufreq_policy *policy;
283 struct cpu_dbs_info_s *dbs_info;
284 unsigned long next_sampling, appointed_at;
286 policy = cpufreq_cpu_get(cpu);
289 dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
290 cpufreq_cpu_put(policy);
292 mutex_lock(&dbs_info->timer_mutex);
294 if (!delayed_work_pending(&dbs_info->work)) {
295 mutex_unlock(&dbs_info->timer_mutex);
299 next_sampling = jiffies + usecs_to_jiffies(new_rate);
300 appointed_at = dbs_info->work.timer.expires;
303 if (time_before(next_sampling, appointed_at)) {
305 mutex_unlock(&dbs_info->timer_mutex);
306 cancel_delayed_work_sync(&dbs_info->work);
307 mutex_lock(&dbs_info->timer_mutex);
309 schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work,
310 usecs_to_jiffies(new_rate));
313 mutex_unlock(&dbs_info->timer_mutex);
317 static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
318 const char *buf, size_t count)
322 ret = sscanf(buf, "%u", &input);
325 update_sampling_rate(input);
329 static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
330 const char *buf, size_t count)
335 ret = sscanf(buf, "%u", &input);
338 dbs_tuners_ins.io_is_busy = !!input;
342 static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
343 const char *buf, size_t count)
347 ret = sscanf(buf, "%u", &input);
349 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
350 input < MIN_FREQUENCY_UP_THRESHOLD) {
353 dbs_tuners_ins.up_threshold = input;
357 static ssize_t store_sampling_down_factor(struct kobject *a,
358 struct attribute *b, const char *buf, size_t count)
360 unsigned int input, j;
362 ret = sscanf(buf, "%u", &input);
364 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
366 dbs_tuners_ins.sampling_down_factor = input;
368 /* Reset down sampling multiplier in case it was active */
369 for_each_online_cpu(j) {
370 struct cpu_dbs_info_s *dbs_info;
371 dbs_info = &per_cpu(od_cpu_dbs_info, j);
372 dbs_info->rate_mult = 1;
377 static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
378 const char *buf, size_t count)
385 ret = sscanf(buf, "%u", &input);
392 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
395 dbs_tuners_ins.ignore_nice = input;
397 /* we need to re-evaluate prev_cpu_idle */
398 for_each_online_cpu(j) {
399 struct cpu_dbs_info_s *dbs_info;
400 dbs_info = &per_cpu(od_cpu_dbs_info, j);
401 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
402 &dbs_info->prev_cpu_wall);
403 if (dbs_tuners_ins.ignore_nice)
404 dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
410 static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
411 const char *buf, size_t count)
415 ret = sscanf(buf, "%u", &input);
423 dbs_tuners_ins.powersave_bias = input;
424 ondemand_powersave_bias_init();
428 define_one_global_rw(sampling_rate);
429 define_one_global_rw(io_is_busy);
430 define_one_global_rw(up_threshold);
431 define_one_global_rw(sampling_down_factor);
432 define_one_global_rw(ignore_nice_load);
433 define_one_global_rw(powersave_bias);
435 static struct attribute *dbs_attributes[] = {
436 &sampling_rate_min.attr,
439 &sampling_down_factor.attr,
440 &ignore_nice_load.attr,
441 &powersave_bias.attr,
446 static struct attribute_group dbs_attr_group = {
447 .attrs = dbs_attributes,
451 /************************** sysfs end ************************/
453 static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
455 if (dbs_tuners_ins.powersave_bias)
456 freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
457 else if (p->cur == p->max)
460 __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?
461 CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
464 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
466 unsigned int max_load_freq;
468 struct cpufreq_policy *policy;
471 this_dbs_info->freq_lo = 0;
472 policy = this_dbs_info->cur_policy;
475 * Every sampling_rate, we check, if current idle time is less
476 * than 20% (default), then we try to increase frequency
477 * Every sampling_rate, we look for a the lowest
478 * frequency which can sustain the load while keeping idle time over
479 * 30%. If such a frequency exist, we try to decrease to this frequency.
481 * Any frequency increase takes it to the maximum frequency.
482 * Frequency reduction happens at minimum steps of
483 * 5% (default) of current frequency
486 /* Get Absolute Load - in terms of freq */
489 for_each_cpu(j, policy->cpus) {
490 struct cpu_dbs_info_s *j_dbs_info;
491 cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
492 unsigned int idle_time, wall_time, iowait_time;
493 unsigned int load, load_freq;
496 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
498 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
499 cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
501 wall_time = (unsigned int)
502 (cur_wall_time - j_dbs_info->prev_cpu_wall);
503 j_dbs_info->prev_cpu_wall = cur_wall_time;
505 idle_time = (unsigned int)
506 (cur_idle_time - j_dbs_info->prev_cpu_idle);
507 j_dbs_info->prev_cpu_idle = cur_idle_time;
509 iowait_time = (unsigned int)
510 (cur_iowait_time - j_dbs_info->prev_cpu_iowait);
511 j_dbs_info->prev_cpu_iowait = cur_iowait_time;
513 if (dbs_tuners_ins.ignore_nice) {
515 unsigned long cur_nice_jiffies;
517 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
518 j_dbs_info->prev_cpu_nice;
520 * Assumption: nice time between sampling periods will
521 * be less than 2^32 jiffies for 32 bit sys
523 cur_nice_jiffies = (unsigned long)
524 cputime64_to_jiffies64(cur_nice);
526 j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
527 idle_time += jiffies_to_usecs(cur_nice_jiffies);
531 * For the purpose of ondemand, waiting for disk IO is an
532 * indication that you're performance critical, and not that
533 * the system is actually idle. So subtract the iowait time
534 * from the cpu idle time.
537 if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
538 idle_time -= iowait_time;
540 if (unlikely(!wall_time || wall_time < idle_time))
543 load = 100 * (wall_time - idle_time) / wall_time;
545 freq_avg = __cpufreq_driver_getavg(policy, j);
547 freq_avg = policy->cur;
549 load_freq = load * freq_avg;
550 if (load_freq > max_load_freq)
551 max_load_freq = load_freq;
554 /* Check for frequency increase */
555 if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
556 /* If switching to max speed, apply sampling_down_factor */
557 if (policy->cur < policy->max)
558 this_dbs_info->rate_mult =
559 dbs_tuners_ins.sampling_down_factor;
560 dbs_freq_increase(policy, policy->max);
564 /* Check for frequency decrease */
565 /* if we cannot reduce the frequency anymore, break out early */
566 if (policy->cur == policy->min)
570 * The optimal frequency is the frequency that is the lowest that
571 * can support the current CPU usage without triggering the up
572 * policy. To be safe, we focus 10 points under the threshold.
575 (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
577 unsigned int freq_next;
578 freq_next = max_load_freq /
579 (dbs_tuners_ins.up_threshold -
580 dbs_tuners_ins.down_differential);
582 /* No longer fully busy, reset rate_mult */
583 this_dbs_info->rate_mult = 1;
585 if (freq_next < policy->min)
586 freq_next = policy->min;
588 if (!dbs_tuners_ins.powersave_bias) {
589 __cpufreq_driver_target(policy, freq_next,
592 int freq = powersave_bias_target(policy, freq_next,
594 __cpufreq_driver_target(policy, freq,
600 static void do_dbs_timer(struct work_struct *work)
602 struct cpu_dbs_info_s *dbs_info =
603 container_of(work, struct cpu_dbs_info_s, work.work);
604 unsigned int cpu = dbs_info->cpu;
605 int sample_type = dbs_info->sample_type;
609 mutex_lock(&dbs_info->timer_mutex);
611 /* Common NORMAL_SAMPLE setup */
612 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
613 if (!dbs_tuners_ins.powersave_bias ||
614 sample_type == DBS_NORMAL_SAMPLE) {
615 dbs_check_cpu(dbs_info);
616 if (dbs_info->freq_lo) {
617 /* Setup timer for SUB_SAMPLE */
618 dbs_info->sample_type = DBS_SUB_SAMPLE;
619 delay = dbs_info->freq_hi_jiffies;
621 /* We want all CPUs to do sampling nearly on
624 delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
625 * dbs_info->rate_mult);
627 if (num_online_cpus() > 1)
628 delay -= jiffies % delay;
631 __cpufreq_driver_target(dbs_info->cur_policy,
632 dbs_info->freq_lo, CPUFREQ_RELATION_H);
633 delay = dbs_info->freq_lo_jiffies;
635 schedule_delayed_work_on(cpu, &dbs_info->work, delay);
636 mutex_unlock(&dbs_info->timer_mutex);
639 static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
641 /* We want all CPUs to do sampling nearly on same jiffy */
642 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
644 if (num_online_cpus() > 1)
645 delay -= jiffies % delay;
647 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
648 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
649 schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
652 static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
654 cancel_delayed_work_sync(&dbs_info->work);
658 * Not all CPUs want IO time to be accounted as busy; this dependson how
659 * efficient idling at a higher frequency/voltage is.
660 * Pavel Machek says this is not so for various generations of AMD and old
662 * Mike Chan (androidlcom) calis this is also not true for ARM.
663 * Because of this, whitelist specific known (series) of CPUs by default, and
664 * leave all others up to the user.
666 static int should_io_be_busy(void)
668 #if defined(CONFIG_X86)
670 * For Intel, Core 2 (model 15) andl later have an efficient idle.
672 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
673 boot_cpu_data.x86 == 6 &&
674 boot_cpu_data.x86_model >= 15)
680 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
683 unsigned int cpu = policy->cpu;
684 struct cpu_dbs_info_s *this_dbs_info;
688 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
691 case CPUFREQ_GOV_START:
692 if ((!cpu_online(cpu)) || (!policy->cur))
695 mutex_lock(&dbs_mutex);
698 for_each_cpu(j, policy->cpus) {
699 struct cpu_dbs_info_s *j_dbs_info;
700 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
701 j_dbs_info->cur_policy = policy;
703 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
704 &j_dbs_info->prev_cpu_wall);
705 if (dbs_tuners_ins.ignore_nice)
706 j_dbs_info->prev_cpu_nice =
707 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
709 this_dbs_info->cpu = cpu;
710 this_dbs_info->rate_mult = 1;
711 ondemand_powersave_bias_init_cpu(cpu);
713 * Start the timerschedule work, when this governor
714 * is used for first time
716 if (dbs_enable == 1) {
717 unsigned int latency;
719 rc = sysfs_create_group(cpufreq_global_kobject,
722 mutex_unlock(&dbs_mutex);
726 /* policy latency is in nS. Convert it to uS first */
727 latency = policy->cpuinfo.transition_latency / 1000;
730 /* Bring kernel and HW constraints together */
731 min_sampling_rate = max(min_sampling_rate,
732 MIN_LATENCY_MULTIPLIER * latency);
733 dbs_tuners_ins.sampling_rate =
734 max(min_sampling_rate,
735 latency * LATENCY_MULTIPLIER);
737 * Cut def_sampling rate to 300ms if it was above,
738 * still consider to not set it above latency
741 if (dbs_tuners_ins.sampling_rate > MAX_DEFAULT_SAMPLING_RATE) {
742 dbs_tuners_ins.sampling_rate =
743 max(min_sampling_rate, MAX_DEFAULT_SAMPLING_RATE);
744 printk(KERN_INFO "CPUFREQ: ondemand sampling "
745 "rate set to %d ms\n",
746 dbs_tuners_ins.sampling_rate / 1000);
749 * Be conservative in respect to performance.
750 * If an application calculates using two threads
751 * depending on each other, they will be run on several
752 * CPU cores resulting on 50% load on both.
753 * SLED might still want to prefer 80% up_threshold
754 * by default, but we cannot differ that here.
756 if (num_online_cpus() > 1)
757 dbs_tuners_ins.up_threshold =
758 DEF_FREQUENCY_UP_THRESHOLD / 2;
759 dbs_tuners_ins.io_is_busy = should_io_be_busy();
761 mutex_unlock(&dbs_mutex);
763 mutex_init(&this_dbs_info->timer_mutex);
764 dbs_timer_init(this_dbs_info);
767 case CPUFREQ_GOV_STOP:
768 dbs_timer_exit(this_dbs_info);
770 mutex_lock(&dbs_mutex);
771 mutex_destroy(&this_dbs_info->timer_mutex);
773 mutex_unlock(&dbs_mutex);
775 sysfs_remove_group(cpufreq_global_kobject,
780 case CPUFREQ_GOV_LIMITS:
781 mutex_lock(&this_dbs_info->timer_mutex);
782 if (policy->max < this_dbs_info->cur_policy->cur)
783 __cpufreq_driver_target(this_dbs_info->cur_policy,
784 policy->max, CPUFREQ_RELATION_H);
785 else if (policy->min > this_dbs_info->cur_policy->cur)
786 __cpufreq_driver_target(this_dbs_info->cur_policy,
787 policy->min, CPUFREQ_RELATION_L);
788 mutex_unlock(&this_dbs_info->timer_mutex);
794 static int __init cpufreq_gov_dbs_init(void)
799 idle_time = get_cpu_idle_time_us(cpu, NULL);
801 if (idle_time != -1ULL) {
802 /* Idle micro accounting is supported. Use finer thresholds */
803 dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
804 dbs_tuners_ins.down_differential =
805 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
807 * In nohz/micro accounting case we set the minimum frequency
808 * not depending on HZ, but fixed (very low). The deferred
809 * timer might skip some samples if idle/sleeping as needed.
811 min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
813 /* For correct statistics, we need 10 ticks for each measure */
815 MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
818 return cpufreq_register_governor(&cpufreq_gov_ondemand);
821 static void __exit cpufreq_gov_dbs_exit(void)
823 cpufreq_unregister_governor(&cpufreq_gov_ondemand);
827 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
828 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
829 MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
830 "Low Latency Frequency Transition capable processors");
831 MODULE_LICENSE("GPL");
833 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
834 fs_initcall(cpufreq_gov_dbs_init);
836 module_init(cpufreq_gov_dbs_init);
838 module_exit(cpufreq_gov_dbs_exit);