2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/notifier.h>
22 #include <linux/cpufreq.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/device.h>
27 #include <linux/slab.h>
28 #include <linux/cpu.h>
29 #include <linux/completion.h>
30 #include <linux/mutex.h>
32 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
36 * The "cpufreq driver" - the arch- or hardware-dependent low
37 * level driver of CPUFreq support, and its spinlock. This lock
38 * also protects the cpufreq_cpu_data array.
40 static struct cpufreq_driver *cpufreq_driver;
41 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
42 #ifdef CONFIG_HOTPLUG_CPU
43 /* This one keeps track of the previously set governor of a removed CPU */
44 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
46 static DEFINE_SPINLOCK(cpufreq_driver_lock);
49 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
50 * all cpufreq/hotplug/workqueue/etc related lock issues.
52 * The rules for this semaphore:
53 * - Any routine that wants to read from the policy structure will
54 * do a down_read on this semaphore.
55 * - Any routine that will write to the policy structure and/or may take away
56 * the policy altogether (eg. CPU hotplug), will hold this lock in write
57 * mode before doing so.
60 * - All holders of the lock should check to make sure that the CPU they
61 * are concerned with are online after they get the lock.
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
67 static DEFINE_PER_CPU(int, policy_cpu);
68 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
70 #define lock_policy_rwsem(mode, cpu) \
71 int lock_policy_rwsem_##mode \
74 int policy_cpu = per_cpu(policy_cpu, cpu); \
75 BUG_ON(policy_cpu == -1); \
76 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
77 if (unlikely(!cpu_online(cpu))) { \
78 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
85 lock_policy_rwsem(read, cpu);
86 EXPORT_SYMBOL_GPL(lock_policy_rwsem_read);
88 lock_policy_rwsem(write, cpu);
89 EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
91 void unlock_policy_rwsem_read(int cpu)
93 int policy_cpu = per_cpu(policy_cpu, cpu);
94 BUG_ON(policy_cpu == -1);
95 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
97 EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
99 void unlock_policy_rwsem_write(int cpu)
101 int policy_cpu = per_cpu(policy_cpu, cpu);
102 BUG_ON(policy_cpu == -1);
103 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
105 EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write);
108 /* internal prototypes */
109 static int __cpufreq_governor(struct cpufreq_policy *policy,
111 static unsigned int __cpufreq_get(unsigned int cpu);
112 static void handle_update(struct work_struct *work);
115 * Two notifier lists: the "policy" list is involved in the
116 * validation process for a new CPU frequency policy; the
117 * "transition" list for kernel code that needs to handle
118 * changes to devices when the CPU clock speed changes.
119 * The mutex locks both lists.
121 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
122 static struct srcu_notifier_head cpufreq_transition_notifier_list;
124 static bool init_cpufreq_transition_notifier_list_called;
125 static int __init init_cpufreq_transition_notifier_list(void)
127 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
128 init_cpufreq_transition_notifier_list_called = true;
131 pure_initcall(init_cpufreq_transition_notifier_list);
133 static LIST_HEAD(cpufreq_governor_list);
134 static DEFINE_MUTEX(cpufreq_governor_mutex);
136 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
138 struct cpufreq_policy *data;
141 if (cpu >= nr_cpu_ids)
144 /* get the cpufreq driver */
145 spin_lock_irqsave(&cpufreq_driver_lock, flags);
150 if (!try_module_get(cpufreq_driver->owner))
155 data = per_cpu(cpufreq_cpu_data, cpu);
158 goto err_out_put_module;
160 if (!kobject_get(&data->kobj))
161 goto err_out_put_module;
163 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
167 module_put(cpufreq_driver->owner);
169 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
173 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
176 void cpufreq_cpu_put(struct cpufreq_policy *data)
178 kobject_put(&data->kobj);
179 module_put(cpufreq_driver->owner);
181 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
184 /*********************************************************************
185 * UNIFIED DEBUG HELPERS *
186 *********************************************************************/
187 #ifdef CONFIG_CPU_FREQ_DEBUG
189 /* what part(s) of the CPUfreq subsystem are debugged? */
190 static unsigned int debug;
192 /* is the debug output ratelimit'ed using printk_ratelimit? User can
193 * set or modify this value.
195 static unsigned int debug_ratelimit = 1;
197 /* is the printk_ratelimit'ing enabled? It's enabled after a successful
198 * loading of a cpufreq driver, temporarily disabled when a new policy
199 * is set, and disabled upon cpufreq driver removal
201 static unsigned int disable_ratelimit = 1;
202 static DEFINE_SPINLOCK(disable_ratelimit_lock);
204 static void cpufreq_debug_enable_ratelimit(void)
208 spin_lock_irqsave(&disable_ratelimit_lock, flags);
209 if (disable_ratelimit)
211 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
214 static void cpufreq_debug_disable_ratelimit(void)
218 spin_lock_irqsave(&disable_ratelimit_lock, flags);
220 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
223 void cpufreq_debug_printk(unsigned int type, const char *prefix,
224 const char *fmt, ...)
233 spin_lock_irqsave(&disable_ratelimit_lock, flags);
234 if (!disable_ratelimit && debug_ratelimit
235 && !printk_ratelimit()) {
236 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
239 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
241 len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
244 len += vsnprintf(&s[len], (256 - len), fmt, args);
252 EXPORT_SYMBOL(cpufreq_debug_printk);
255 module_param(debug, uint, 0644);
256 MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
257 " 2 to debug drivers, and 4 to debug governors.");
259 module_param(debug_ratelimit, uint, 0644);
260 MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
261 " set to 0 to disable ratelimiting.");
263 #else /* !CONFIG_CPU_FREQ_DEBUG */
265 static inline void cpufreq_debug_enable_ratelimit(void) { return; }
266 static inline void cpufreq_debug_disable_ratelimit(void) { return; }
268 #endif /* CONFIG_CPU_FREQ_DEBUG */
271 /*********************************************************************
272 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
273 *********************************************************************/
276 * adjust_jiffies - adjust the system "loops_per_jiffy"
278 * This function alters the system "loops_per_jiffy" for the clock
279 * speed change. Note that loops_per_jiffy cannot be updated on SMP
280 * systems as each CPU might be scaled differently. So, use the arch
281 * per-CPU loops_per_jiffy value wherever possible.
284 static unsigned long l_p_j_ref;
285 static unsigned int l_p_j_ref_freq;
287 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
289 if (ci->flags & CPUFREQ_CONST_LOOPS)
292 if (!l_p_j_ref_freq) {
293 l_p_j_ref = loops_per_jiffy;
294 l_p_j_ref_freq = ci->old;
295 dprintk("saving %lu as reference value for loops_per_jiffy; "
296 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
298 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
299 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
300 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
301 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
303 dprintk("scaling loops_per_jiffy to %lu "
304 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
308 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
316 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
317 * on frequency transition.
319 * This function calls the transition notifiers and the "adjust_jiffies"
320 * function. It is called twice on all CPU frequency changes that have
323 void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
325 struct cpufreq_policy *policy;
327 BUG_ON(irqs_disabled());
329 freqs->flags = cpufreq_driver->flags;
330 dprintk("notification %u of frequency transition to %u kHz\n",
333 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
336 case CPUFREQ_PRECHANGE:
337 /* detect if the driver reported a value as "old frequency"
338 * which is not equal to what the cpufreq core thinks is
341 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
342 if ((policy) && (policy->cpu == freqs->cpu) &&
343 (policy->cur) && (policy->cur != freqs->old)) {
344 dprintk("Warning: CPU frequency is"
345 " %u, cpufreq assumed %u kHz.\n",
346 freqs->old, policy->cur);
347 freqs->old = policy->cur;
350 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
351 CPUFREQ_PRECHANGE, freqs);
352 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
355 case CPUFREQ_POSTCHANGE:
356 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
357 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
358 CPUFREQ_POSTCHANGE, freqs);
359 if (likely(policy) && likely(policy->cpu == freqs->cpu))
360 policy->cur = freqs->new;
364 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
368 /*********************************************************************
370 *********************************************************************/
372 static struct cpufreq_governor *__find_governor(const char *str_governor)
374 struct cpufreq_governor *t;
376 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
377 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
384 * cpufreq_parse_governor - parse a governor string
386 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
387 struct cpufreq_governor **governor)
394 if (cpufreq_driver->setpolicy) {
395 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
396 *policy = CPUFREQ_POLICY_PERFORMANCE;
398 } else if (!strnicmp(str_governor, "powersave",
400 *policy = CPUFREQ_POLICY_POWERSAVE;
403 } else if (cpufreq_driver->target) {
404 struct cpufreq_governor *t;
406 mutex_lock(&cpufreq_governor_mutex);
408 t = __find_governor(str_governor);
411 char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
417 mutex_unlock(&cpufreq_governor_mutex);
418 ret = request_module("%s", name);
419 mutex_lock(&cpufreq_governor_mutex);
422 t = __find_governor(str_governor);
433 mutex_unlock(&cpufreq_governor_mutex);
441 * cpufreq_per_cpu_attr_read() / show_##file_name() -
442 * print out cpufreq information
444 * Write out information from cpufreq_driver->policy[cpu]; object must be
448 #define show_one(file_name, object) \
449 static ssize_t show_##file_name \
450 (struct cpufreq_policy *policy, char *buf) \
452 return sprintf(buf, "%u\n", policy->object); \
455 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
456 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
457 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
458 show_one(scaling_min_freq, min);
459 show_one(scaling_max_freq, max);
460 show_one(scaling_cur_freq, cur);
462 static int __cpufreq_set_policy(struct cpufreq_policy *data,
463 struct cpufreq_policy *policy);
466 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
468 #define store_one(file_name, object) \
469 static ssize_t store_##file_name \
470 (struct cpufreq_policy *policy, const char *buf, size_t count) \
472 unsigned int ret = -EINVAL; \
473 struct cpufreq_policy new_policy; \
475 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
479 ret = sscanf(buf, "%u", &new_policy.object); \
483 ret = __cpufreq_set_policy(policy, &new_policy); \
484 policy->user_policy.object = policy->object; \
486 return ret ? ret : count; \
489 store_one(scaling_min_freq, min);
490 store_one(scaling_max_freq, max);
493 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
495 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
498 unsigned int cur_freq = __cpufreq_get(policy->cpu);
500 return sprintf(buf, "<unknown>");
501 return sprintf(buf, "%u\n", cur_freq);
506 * show_scaling_governor - show the current policy for the specified CPU
508 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
510 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
511 return sprintf(buf, "powersave\n");
512 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
513 return sprintf(buf, "performance\n");
514 else if (policy->governor)
515 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
516 policy->governor->name);
522 * store_scaling_governor - store policy for the specified CPU
524 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
525 const char *buf, size_t count)
527 unsigned int ret = -EINVAL;
528 char str_governor[16];
529 struct cpufreq_policy new_policy;
531 ret = cpufreq_get_policy(&new_policy, policy->cpu);
535 ret = sscanf(buf, "%15s", str_governor);
539 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
540 &new_policy.governor))
543 /* Do not use cpufreq_set_policy here or the user_policy.max
544 will be wrongly overridden */
545 ret = __cpufreq_set_policy(policy, &new_policy);
547 policy->user_policy.policy = policy->policy;
548 policy->user_policy.governor = policy->governor;
557 * show_scaling_driver - show the cpufreq driver currently loaded
559 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
561 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
565 * show_scaling_available_governors - show the available CPUfreq governors
567 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
571 struct cpufreq_governor *t;
573 if (!cpufreq_driver->target) {
574 i += sprintf(buf, "performance powersave");
578 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
579 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
580 - (CPUFREQ_NAME_LEN + 2)))
582 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
585 i += sprintf(&buf[i], "\n");
589 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
594 for_each_cpu(cpu, mask) {
596 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
597 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
598 if (i >= (PAGE_SIZE - 5))
601 i += sprintf(&buf[i], "\n");
606 * show_related_cpus - show the CPUs affected by each transition even if
607 * hw coordination is in use
609 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
611 if (cpumask_empty(policy->related_cpus))
612 return show_cpus(policy->cpus, buf);
613 return show_cpus(policy->related_cpus, buf);
617 * show_affected_cpus - show the CPUs affected by each transition
619 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
621 return show_cpus(policy->cpus, buf);
624 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
625 const char *buf, size_t count)
627 unsigned int freq = 0;
630 if (!policy->governor || !policy->governor->store_setspeed)
633 ret = sscanf(buf, "%u", &freq);
637 policy->governor->store_setspeed(policy, freq);
642 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
644 if (!policy->governor || !policy->governor->show_setspeed)
645 return sprintf(buf, "<unsupported>\n");
647 return policy->governor->show_setspeed(policy, buf);
650 #define define_one_ro(_name) \
651 static struct freq_attr _name = \
652 __ATTR(_name, 0444, show_##_name, NULL)
654 #define define_one_ro0400(_name) \
655 static struct freq_attr _name = \
656 __ATTR(_name, 0400, show_##_name, NULL)
658 #define define_one_rw(_name) \
659 static struct freq_attr _name = \
660 __ATTR(_name, 0644, show_##_name, store_##_name)
662 define_one_ro0400(cpuinfo_cur_freq);
663 define_one_ro(cpuinfo_min_freq);
664 define_one_ro(cpuinfo_max_freq);
665 define_one_ro(cpuinfo_transition_latency);
666 define_one_ro(scaling_available_governors);
667 define_one_ro(scaling_driver);
668 define_one_ro(scaling_cur_freq);
669 define_one_ro(related_cpus);
670 define_one_ro(affected_cpus);
671 define_one_rw(scaling_min_freq);
672 define_one_rw(scaling_max_freq);
673 define_one_rw(scaling_governor);
674 define_one_rw(scaling_setspeed);
676 static struct attribute *default_attrs[] = {
677 &cpuinfo_min_freq.attr,
678 &cpuinfo_max_freq.attr,
679 &cpuinfo_transition_latency.attr,
680 &scaling_min_freq.attr,
681 &scaling_max_freq.attr,
684 &scaling_governor.attr,
685 &scaling_driver.attr,
686 &scaling_available_governors.attr,
687 &scaling_setspeed.attr,
691 struct kobject *cpufreq_global_kobject;
692 EXPORT_SYMBOL(cpufreq_global_kobject);
694 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
695 #define to_attr(a) container_of(a, struct freq_attr, attr)
697 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
699 struct cpufreq_policy *policy = to_policy(kobj);
700 struct freq_attr *fattr = to_attr(attr);
701 ssize_t ret = -EINVAL;
702 policy = cpufreq_cpu_get(policy->cpu);
706 if (lock_policy_rwsem_read(policy->cpu) < 0)
710 ret = fattr->show(policy, buf);
714 unlock_policy_rwsem_read(policy->cpu);
716 cpufreq_cpu_put(policy);
721 static ssize_t store(struct kobject *kobj, struct attribute *attr,
722 const char *buf, size_t count)
724 struct cpufreq_policy *policy = to_policy(kobj);
725 struct freq_attr *fattr = to_attr(attr);
726 ssize_t ret = -EINVAL;
727 policy = cpufreq_cpu_get(policy->cpu);
731 if (lock_policy_rwsem_write(policy->cpu) < 0)
735 ret = fattr->store(policy, buf, count);
739 unlock_policy_rwsem_write(policy->cpu);
741 cpufreq_cpu_put(policy);
746 static void cpufreq_sysfs_release(struct kobject *kobj)
748 struct cpufreq_policy *policy = to_policy(kobj);
749 dprintk("last reference is dropped\n");
750 complete(&policy->kobj_unregister);
753 static struct sysfs_ops sysfs_ops = {
758 static struct kobj_type ktype_cpufreq = {
759 .sysfs_ops = &sysfs_ops,
760 .default_attrs = default_attrs,
761 .release = cpufreq_sysfs_release,
768 * Positive: When we have a managed CPU and the sysfs got symlinked
770 static int cpufreq_add_dev_policy(unsigned int cpu,
771 struct cpufreq_policy *policy,
772 struct sys_device *sys_dev)
778 #ifdef CONFIG_HOTPLUG_CPU
779 struct cpufreq_governor *gov;
781 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
783 policy->governor = gov;
784 dprintk("Restoring governor %s for cpu %d\n",
785 policy->governor->name, cpu);
789 for_each_cpu(j, policy->cpus) {
790 struct cpufreq_policy *managed_policy;
795 /* Check for existing affected CPUs.
796 * They may not be aware of it due to CPU Hotplug.
797 * cpufreq_cpu_put is called when the device is removed
798 * in __cpufreq_remove_dev()
800 managed_policy = cpufreq_cpu_get(j);
801 if (unlikely(managed_policy)) {
803 /* Set proper policy_cpu */
804 unlock_policy_rwsem_write(cpu);
805 per_cpu(policy_cpu, cpu) = managed_policy->cpu;
807 if (lock_policy_rwsem_write(cpu) < 0) {
808 /* Should not go through policy unlock path */
809 if (cpufreq_driver->exit)
810 cpufreq_driver->exit(policy);
811 cpufreq_cpu_put(managed_policy);
815 spin_lock_irqsave(&cpufreq_driver_lock, flags);
816 cpumask_copy(managed_policy->cpus, policy->cpus);
817 per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
818 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
820 dprintk("CPU already managed, adding link\n");
821 ret = sysfs_create_link(&sys_dev->kobj,
822 &managed_policy->kobj,
825 cpufreq_cpu_put(managed_policy);
827 * Success. We only needed to be added to the mask.
828 * Call driver->exit() because only the cpu parent of
829 * the kobj needed to call init().
831 if (cpufreq_driver->exit)
832 cpufreq_driver->exit(policy);
845 /* symlink affected CPUs */
846 static int cpufreq_add_dev_symlink(unsigned int cpu,
847 struct cpufreq_policy *policy)
852 for_each_cpu(j, policy->cpus) {
853 struct cpufreq_policy *managed_policy;
854 struct sys_device *cpu_sys_dev;
861 dprintk("CPU %u already managed, adding link\n", j);
862 managed_policy = cpufreq_cpu_get(cpu);
863 cpu_sys_dev = get_cpu_sysdev(j);
864 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
867 cpufreq_cpu_put(managed_policy);
874 static int cpufreq_add_dev_interface(unsigned int cpu,
875 struct cpufreq_policy *policy,
876 struct sys_device *sys_dev)
878 struct cpufreq_policy new_policy;
879 struct freq_attr **drv_attr;
884 /* prepare interface data */
885 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
886 &sys_dev->kobj, "cpufreq");
890 /* set up files for this cpu device */
891 drv_attr = cpufreq_driver->attr;
892 while ((drv_attr) && (*drv_attr)) {
893 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
895 goto err_out_kobj_put;
898 if (cpufreq_driver->get) {
899 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
901 goto err_out_kobj_put;
903 if (cpufreq_driver->target) {
904 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
906 goto err_out_kobj_put;
909 spin_lock_irqsave(&cpufreq_driver_lock, flags);
910 for_each_cpu(j, policy->cpus) {
913 per_cpu(cpufreq_cpu_data, j) = policy;
914 per_cpu(policy_cpu, j) = policy->cpu;
916 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
918 ret = cpufreq_add_dev_symlink(cpu, policy);
920 goto err_out_kobj_put;
922 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
923 /* assure that the starting sequence is run in __cpufreq_set_policy */
924 policy->governor = NULL;
926 /* set default policy */
927 ret = __cpufreq_set_policy(policy, &new_policy);
928 policy->user_policy.policy = policy->policy;
929 policy->user_policy.governor = policy->governor;
932 dprintk("setting policy failed\n");
933 if (cpufreq_driver->exit)
934 cpufreq_driver->exit(policy);
939 kobject_put(&policy->kobj);
940 wait_for_completion(&policy->kobj_unregister);
946 * cpufreq_add_dev - add a CPU device
948 * Adds the cpufreq interface for a CPU device.
950 * The Oracle says: try running cpufreq registration/unregistration concurrently
951 * with with cpu hotplugging and all hell will break loose. Tried to clean this
952 * mess up, but more thorough testing is needed. - Mathieu
954 static int cpufreq_add_dev(struct sys_device *sys_dev)
956 unsigned int cpu = sys_dev->id;
957 int ret = 0, found = 0;
958 struct cpufreq_policy *policy;
961 #ifdef CONFIG_HOTPLUG_CPU
965 if (cpu_is_offline(cpu))
968 cpufreq_debug_disable_ratelimit();
969 dprintk("adding CPU %u\n", cpu);
972 /* check whether a different CPU already registered this
973 * CPU because it is in the same boat. */
974 policy = cpufreq_cpu_get(cpu);
975 if (unlikely(policy)) {
976 cpufreq_cpu_put(policy);
977 cpufreq_debug_enable_ratelimit();
982 if (!try_module_get(cpufreq_driver->owner)) {
988 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
992 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
993 goto err_free_policy;
995 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
996 goto err_free_cpumask;
999 cpumask_copy(policy->cpus, cpumask_of(cpu));
1001 /* Initially set CPU itself as the policy_cpu */
1002 per_cpu(policy_cpu, cpu) = cpu;
1003 ret = (lock_policy_rwsem_write(cpu) < 0);
1006 init_completion(&policy->kobj_unregister);
1007 INIT_WORK(&policy->update, handle_update);
1009 /* Set governor before ->init, so that driver could check it */
1010 #ifdef CONFIG_HOTPLUG_CPU
1011 for_each_online_cpu(sibling) {
1012 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
1013 if (cp && cp->governor &&
1014 (cpumask_test_cpu(cpu, cp->related_cpus))) {
1015 policy->governor = cp->governor;
1022 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1023 /* call driver. From then on the cpufreq must be able
1024 * to accept all calls to ->verify and ->setpolicy for this CPU
1026 ret = cpufreq_driver->init(policy);
1028 dprintk("initialization failed\n");
1029 goto err_unlock_policy;
1031 policy->user_policy.min = policy->min;
1032 policy->user_policy.max = policy->max;
1034 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1035 CPUFREQ_START, policy);
1037 ret = cpufreq_add_dev_policy(cpu, policy, sys_dev);
1040 /* This is a managed cpu, symlink created,
1043 goto err_unlock_policy;
1046 ret = cpufreq_add_dev_interface(cpu, policy, sys_dev);
1048 goto err_out_unregister;
1050 unlock_policy_rwsem_write(cpu);
1052 kobject_uevent(&policy->kobj, KOBJ_ADD);
1053 module_put(cpufreq_driver->owner);
1054 dprintk("initialization complete\n");
1055 cpufreq_debug_enable_ratelimit();
1061 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1062 for_each_cpu(j, policy->cpus)
1063 per_cpu(cpufreq_cpu_data, j) = NULL;
1064 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1066 kobject_put(&policy->kobj);
1067 wait_for_completion(&policy->kobj_unregister);
1070 unlock_policy_rwsem_write(cpu);
1072 free_cpumask_var(policy->cpus);
1076 module_put(cpufreq_driver->owner);
1078 cpufreq_debug_enable_ratelimit();
1084 * __cpufreq_remove_dev - remove a CPU device
1086 * Removes the cpufreq interface for a CPU device.
1087 * Caller should already have policy_rwsem in write mode for this CPU.
1088 * This routine frees the rwsem before returning.
1090 static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1092 unsigned int cpu = sys_dev->id;
1093 unsigned long flags;
1094 struct cpufreq_policy *data;
1096 struct sys_device *cpu_sys_dev;
1100 cpufreq_debug_disable_ratelimit();
1101 dprintk("unregistering CPU %u\n", cpu);
1103 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1104 data = per_cpu(cpufreq_cpu_data, cpu);
1107 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1108 cpufreq_debug_enable_ratelimit();
1109 unlock_policy_rwsem_write(cpu);
1112 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1116 /* if this isn't the CPU which is the parent of the kobj, we
1117 * only need to unlink, put and exit
1119 if (unlikely(cpu != data->cpu)) {
1120 dprintk("removing link\n");
1121 cpumask_clear_cpu(cpu, data->cpus);
1122 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1123 sysfs_remove_link(&sys_dev->kobj, "cpufreq");
1124 cpufreq_cpu_put(data);
1125 cpufreq_debug_enable_ratelimit();
1126 unlock_policy_rwsem_write(cpu);
1133 #ifdef CONFIG_HOTPLUG_CPU
1134 strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
1138 /* if we have other CPUs still registered, we need to unlink them,
1139 * or else wait_for_completion below will lock up. Clean the
1140 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
1141 * the sysfs links afterwards.
1143 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1144 for_each_cpu(j, data->cpus) {
1147 per_cpu(cpufreq_cpu_data, j) = NULL;
1151 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1153 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1154 for_each_cpu(j, data->cpus) {
1157 dprintk("removing link for cpu %u\n", j);
1158 #ifdef CONFIG_HOTPLUG_CPU
1159 strncpy(per_cpu(cpufreq_cpu_governor, j),
1160 data->governor->name, CPUFREQ_NAME_LEN);
1162 cpu_sys_dev = get_cpu_sysdev(j);
1163 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
1164 cpufreq_cpu_put(data);
1168 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1171 if (cpufreq_driver->target)
1172 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1174 kobject_put(&data->kobj);
1176 /* we need to make sure that the underlying kobj is actually
1177 * not referenced anymore by anybody before we proceed with
1180 dprintk("waiting for dropping of refcount\n");
1181 wait_for_completion(&data->kobj_unregister);
1182 dprintk("wait complete\n");
1184 if (cpufreq_driver->exit)
1185 cpufreq_driver->exit(data);
1187 unlock_policy_rwsem_write(cpu);
1189 free_cpumask_var(data->related_cpus);
1190 free_cpumask_var(data->cpus);
1192 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1194 cpufreq_debug_enable_ratelimit();
1199 static int cpufreq_remove_dev(struct sys_device *sys_dev)
1201 unsigned int cpu = sys_dev->id;
1204 if (cpu_is_offline(cpu))
1207 if (unlikely(lock_policy_rwsem_write(cpu)))
1210 retval = __cpufreq_remove_dev(sys_dev);
1215 static void handle_update(struct work_struct *work)
1217 struct cpufreq_policy *policy =
1218 container_of(work, struct cpufreq_policy, update);
1219 unsigned int cpu = policy->cpu;
1220 dprintk("handle_update for cpu %u called\n", cpu);
1221 cpufreq_update_policy(cpu);
1225 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1227 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1228 * @new_freq: CPU frequency the CPU actually runs at
1230 * We adjust to current frequency first, and need to clean up later.
1231 * So either call to cpufreq_update_policy() or schedule handle_update()).
1233 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1234 unsigned int new_freq)
1236 struct cpufreq_freqs freqs;
1238 dprintk("Warning: CPU frequency out of sync: cpufreq and timing "
1239 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1242 freqs.old = old_freq;
1243 freqs.new = new_freq;
1244 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1245 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1250 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1253 * This is the last known freq, without actually getting it from the driver.
1254 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1256 unsigned int cpufreq_quick_get(unsigned int cpu)
1258 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1259 unsigned int ret_freq = 0;
1262 ret_freq = policy->cur;
1263 cpufreq_cpu_put(policy);
1268 EXPORT_SYMBOL(cpufreq_quick_get);
1271 static unsigned int __cpufreq_get(unsigned int cpu)
1273 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1274 unsigned int ret_freq = 0;
1276 if (!cpufreq_driver->get)
1279 ret_freq = cpufreq_driver->get(cpu);
1281 if (ret_freq && policy->cur &&
1282 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1283 /* verify no discrepancy between actual and
1284 saved value exists */
1285 if (unlikely(ret_freq != policy->cur)) {
1286 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1287 schedule_work(&policy->update);
1295 * cpufreq_get - get the current CPU frequency (in kHz)
1298 * Get the CPU current (static) CPU frequency
1300 unsigned int cpufreq_get(unsigned int cpu)
1302 unsigned int ret_freq = 0;
1303 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1308 if (unlikely(lock_policy_rwsem_read(cpu)))
1311 ret_freq = __cpufreq_get(cpu);
1313 unlock_policy_rwsem_read(cpu);
1316 cpufreq_cpu_put(policy);
1320 EXPORT_SYMBOL(cpufreq_get);
1324 * cpufreq_suspend - let the low level driver prepare for suspend
1327 static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
1331 int cpu = sysdev->id;
1332 struct cpufreq_policy *cpu_policy;
1334 dprintk("suspending cpu %u\n", cpu);
1336 if (!cpu_online(cpu))
1339 /* we may be lax here as interrupts are off. Nonetheless
1340 * we need to grab the correct cpu policy, as to check
1341 * whether we really run on this CPU.
1344 cpu_policy = cpufreq_cpu_get(cpu);
1348 /* only handle each CPU group once */
1349 if (unlikely(cpu_policy->cpu != cpu))
1352 if (cpufreq_driver->suspend) {
1353 ret = cpufreq_driver->suspend(cpu_policy, pmsg);
1355 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1356 "step on CPU %u\n", cpu_policy->cpu);
1360 cpufreq_cpu_put(cpu_policy);
1365 * cpufreq_resume - restore proper CPU frequency handling after resume
1367 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1368 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1369 * restored. It will verify that the current freq is in sync with
1370 * what we believe it to be. This is a bit later than when it
1371 * should be, but nonethteless it's better than calling
1372 * cpufreq_driver->get() here which might re-enable interrupts...
1374 static int cpufreq_resume(struct sys_device *sysdev)
1378 int cpu = sysdev->id;
1379 struct cpufreq_policy *cpu_policy;
1381 dprintk("resuming cpu %u\n", cpu);
1383 if (!cpu_online(cpu))
1386 /* we may be lax here as interrupts are off. Nonetheless
1387 * we need to grab the correct cpu policy, as to check
1388 * whether we really run on this CPU.
1391 cpu_policy = cpufreq_cpu_get(cpu);
1395 /* only handle each CPU group once */
1396 if (unlikely(cpu_policy->cpu != cpu))
1399 if (cpufreq_driver->resume) {
1400 ret = cpufreq_driver->resume(cpu_policy);
1402 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1403 "step on CPU %u\n", cpu_policy->cpu);
1408 schedule_work(&cpu_policy->update);
1411 cpufreq_cpu_put(cpu_policy);
1415 static struct sysdev_driver cpufreq_sysdev_driver = {
1416 .add = cpufreq_add_dev,
1417 .remove = cpufreq_remove_dev,
1418 .suspend = cpufreq_suspend,
1419 .resume = cpufreq_resume,
1423 /*********************************************************************
1424 * NOTIFIER LISTS INTERFACE *
1425 *********************************************************************/
1428 * cpufreq_register_notifier - register a driver with cpufreq
1429 * @nb: notifier function to register
1430 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1432 * Add a driver to one of two lists: either a list of drivers that
1433 * are notified about clock rate changes (once before and once after
1434 * the transition), or a list of drivers that are notified about
1435 * changes in cpufreq policy.
1437 * This function may sleep, and has the same return conditions as
1438 * blocking_notifier_chain_register.
1440 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1444 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1447 case CPUFREQ_TRANSITION_NOTIFIER:
1448 ret = srcu_notifier_chain_register(
1449 &cpufreq_transition_notifier_list, nb);
1451 case CPUFREQ_POLICY_NOTIFIER:
1452 ret = blocking_notifier_chain_register(
1453 &cpufreq_policy_notifier_list, nb);
1461 EXPORT_SYMBOL(cpufreq_register_notifier);
1465 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1466 * @nb: notifier block to be unregistered
1467 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1469 * Remove a driver from the CPU frequency notifier list.
1471 * This function may sleep, and has the same return conditions as
1472 * blocking_notifier_chain_unregister.
1474 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1479 case CPUFREQ_TRANSITION_NOTIFIER:
1480 ret = srcu_notifier_chain_unregister(
1481 &cpufreq_transition_notifier_list, nb);
1483 case CPUFREQ_POLICY_NOTIFIER:
1484 ret = blocking_notifier_chain_unregister(
1485 &cpufreq_policy_notifier_list, nb);
1493 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1496 /*********************************************************************
1498 *********************************************************************/
1501 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1502 unsigned int target_freq,
1503 unsigned int relation)
1505 int retval = -EINVAL;
1507 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1508 target_freq, relation);
1509 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1510 retval = cpufreq_driver->target(policy, target_freq, relation);
1514 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1516 int cpufreq_driver_target(struct cpufreq_policy *policy,
1517 unsigned int target_freq,
1518 unsigned int relation)
1522 policy = cpufreq_cpu_get(policy->cpu);
1526 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1529 ret = __cpufreq_driver_target(policy, target_freq, relation);
1531 unlock_policy_rwsem_write(policy->cpu);
1534 cpufreq_cpu_put(policy);
1538 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1540 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1544 policy = cpufreq_cpu_get(policy->cpu);
1548 if (cpu_online(cpu) && cpufreq_driver->getavg)
1549 ret = cpufreq_driver->getavg(policy, cpu);
1551 cpufreq_cpu_put(policy);
1554 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1557 * when "event" is CPUFREQ_GOV_LIMITS
1560 static int __cpufreq_governor(struct cpufreq_policy *policy,
1565 /* Only must be defined when default governor is known to have latency
1566 restrictions, like e.g. conservative or ondemand.
1567 That this is the case is already ensured in Kconfig
1569 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1570 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1572 struct cpufreq_governor *gov = NULL;
1575 if (policy->governor->max_transition_latency &&
1576 policy->cpuinfo.transition_latency >
1577 policy->governor->max_transition_latency) {
1581 printk(KERN_WARNING "%s governor failed, too long"
1582 " transition latency of HW, fallback"
1583 " to %s governor\n",
1584 policy->governor->name,
1586 policy->governor = gov;
1590 if (!try_module_get(policy->governor->owner))
1593 dprintk("__cpufreq_governor for CPU %u, event %u\n",
1594 policy->cpu, event);
1595 ret = policy->governor->governor(policy, event);
1597 /* we keep one module reference alive for
1598 each CPU governed by this CPU */
1599 if ((event != CPUFREQ_GOV_START) || ret)
1600 module_put(policy->governor->owner);
1601 if ((event == CPUFREQ_GOV_STOP) && !ret)
1602 module_put(policy->governor->owner);
1608 int cpufreq_register_governor(struct cpufreq_governor *governor)
1615 mutex_lock(&cpufreq_governor_mutex);
1618 if (__find_governor(governor->name) == NULL) {
1620 list_add(&governor->governor_list, &cpufreq_governor_list);
1623 mutex_unlock(&cpufreq_governor_mutex);
1626 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1629 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1631 #ifdef CONFIG_HOTPLUG_CPU
1638 #ifdef CONFIG_HOTPLUG_CPU
1639 for_each_present_cpu(cpu) {
1640 if (cpu_online(cpu))
1642 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1643 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1647 mutex_lock(&cpufreq_governor_mutex);
1648 list_del(&governor->governor_list);
1649 mutex_unlock(&cpufreq_governor_mutex);
1652 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1656 /*********************************************************************
1657 * POLICY INTERFACE *
1658 *********************************************************************/
1661 * cpufreq_get_policy - get the current cpufreq_policy
1662 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1665 * Reads the current cpufreq policy.
1667 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1669 struct cpufreq_policy *cpu_policy;
1673 cpu_policy = cpufreq_cpu_get(cpu);
1677 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1679 cpufreq_cpu_put(cpu_policy);
1682 EXPORT_SYMBOL(cpufreq_get_policy);
1686 * data : current policy.
1687 * policy : policy to be set.
1689 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1690 struct cpufreq_policy *policy)
1694 cpufreq_debug_disable_ratelimit();
1695 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1696 policy->min, policy->max);
1698 memcpy(&policy->cpuinfo, &data->cpuinfo,
1699 sizeof(struct cpufreq_cpuinfo));
1701 if (policy->min > data->max || policy->max < data->min) {
1706 /* verify the cpu speed can be set within this limit */
1707 ret = cpufreq_driver->verify(policy);
1711 /* adjust if necessary - all reasons */
1712 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1713 CPUFREQ_ADJUST, policy);
1715 /* adjust if necessary - hardware incompatibility*/
1716 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1717 CPUFREQ_INCOMPATIBLE, policy);
1719 /* verify the cpu speed can be set within this limit,
1720 which might be different to the first one */
1721 ret = cpufreq_driver->verify(policy);
1725 /* notification of the new policy */
1726 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1727 CPUFREQ_NOTIFY, policy);
1729 data->min = policy->min;
1730 data->max = policy->max;
1732 dprintk("new min and max freqs are %u - %u kHz\n",
1733 data->min, data->max);
1735 if (cpufreq_driver->setpolicy) {
1736 data->policy = policy->policy;
1737 dprintk("setting range\n");
1738 ret = cpufreq_driver->setpolicy(policy);
1740 if (policy->governor != data->governor) {
1741 /* save old, working values */
1742 struct cpufreq_governor *old_gov = data->governor;
1744 dprintk("governor switch\n");
1746 /* end old governor */
1747 if (data->governor) {
1749 * Need to release the rwsem around governor
1750 * stop due to lock dependency between
1751 * cancel_delayed_work_sync and the read lock
1752 * taken in the delayed work handler.
1754 unlock_policy_rwsem_write(data->cpu);
1755 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1756 lock_policy_rwsem_write(data->cpu);
1759 /* start new governor */
1760 data->governor = policy->governor;
1761 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1762 /* new governor failed, so re-start old one */
1763 dprintk("starting governor %s failed\n",
1764 data->governor->name);
1766 data->governor = old_gov;
1767 __cpufreq_governor(data,
1773 /* might be a policy change, too, so fall through */
1775 dprintk("governor: change or update limits\n");
1776 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1780 cpufreq_debug_enable_ratelimit();
1785 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1786 * @cpu: CPU which shall be re-evaluated
1788 * Usefull for policy notifiers which have different necessities
1789 * at different times.
1791 int cpufreq_update_policy(unsigned int cpu)
1793 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1794 struct cpufreq_policy policy;
1802 if (unlikely(lock_policy_rwsem_write(cpu))) {
1807 dprintk("updating policy for CPU %u\n", cpu);
1808 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1809 policy.min = data->user_policy.min;
1810 policy.max = data->user_policy.max;
1811 policy.policy = data->user_policy.policy;
1812 policy.governor = data->user_policy.governor;
1814 /* BIOS might change freq behind our back
1815 -> ask driver for current freq and notify governors about a change */
1816 if (cpufreq_driver->get) {
1817 policy.cur = cpufreq_driver->get(cpu);
1819 dprintk("Driver did not initialize current freq");
1820 data->cur = policy.cur;
1822 if (data->cur != policy.cur)
1823 cpufreq_out_of_sync(cpu, data->cur,
1828 ret = __cpufreq_set_policy(data, &policy);
1830 unlock_policy_rwsem_write(cpu);
1833 cpufreq_cpu_put(data);
1837 EXPORT_SYMBOL(cpufreq_update_policy);
1839 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1840 unsigned long action, void *hcpu)
1842 unsigned int cpu = (unsigned long)hcpu;
1843 struct sys_device *sys_dev;
1845 sys_dev = get_cpu_sysdev(cpu);
1849 case CPU_ONLINE_FROZEN:
1850 cpufreq_add_dev(sys_dev);
1852 case CPU_DOWN_PREPARE:
1853 case CPU_DOWN_PREPARE_FROZEN:
1854 if (unlikely(lock_policy_rwsem_write(cpu)))
1857 __cpufreq_remove_dev(sys_dev);
1859 case CPU_DOWN_FAILED:
1860 case CPU_DOWN_FAILED_FROZEN:
1861 cpufreq_add_dev(sys_dev);
1868 static struct notifier_block __refdata cpufreq_cpu_notifier =
1870 .notifier_call = cpufreq_cpu_callback,
1873 /*********************************************************************
1874 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1875 *********************************************************************/
1878 * cpufreq_register_driver - register a CPU Frequency driver
1879 * @driver_data: A struct cpufreq_driver containing the values#
1880 * submitted by the CPU Frequency driver.
1882 * Registers a CPU Frequency driver to this core code. This code
1883 * returns zero on success, -EBUSY when another driver got here first
1884 * (and isn't unregistered in the meantime).
1887 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1889 unsigned long flags;
1892 if (!driver_data || !driver_data->verify || !driver_data->init ||
1893 ((!driver_data->setpolicy) && (!driver_data->target)))
1896 dprintk("trying to register driver %s\n", driver_data->name);
1898 if (driver_data->setpolicy)
1899 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1901 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1902 if (cpufreq_driver) {
1903 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1906 cpufreq_driver = driver_data;
1907 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1909 ret = sysdev_driver_register(&cpu_sysdev_class,
1910 &cpufreq_sysdev_driver);
1912 if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1916 /* check for at least one working CPU */
1917 for (i = 0; i < nr_cpu_ids; i++)
1918 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1923 /* if all ->init() calls failed, unregister */
1925 dprintk("no CPU initialized for driver %s\n",
1927 sysdev_driver_unregister(&cpu_sysdev_class,
1928 &cpufreq_sysdev_driver);
1930 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1931 cpufreq_driver = NULL;
1932 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1937 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1938 dprintk("driver %s up and running\n", driver_data->name);
1939 cpufreq_debug_enable_ratelimit();
1944 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1948 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1950 * Unregister the current CPUFreq driver. Only call this if you have
1951 * the right to do so, i.e. if you have succeeded in initialising before!
1952 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1953 * currently not initialised.
1955 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1957 unsigned long flags;
1959 cpufreq_debug_disable_ratelimit();
1961 if (!cpufreq_driver || (driver != cpufreq_driver)) {
1962 cpufreq_debug_enable_ratelimit();
1966 dprintk("unregistering driver %s\n", driver->name);
1968 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1969 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1971 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1972 cpufreq_driver = NULL;
1973 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1977 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
1979 static int __init cpufreq_core_init(void)
1983 for_each_possible_cpu(cpu) {
1984 per_cpu(policy_cpu, cpu) = -1;
1985 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1988 cpufreq_global_kobject = kobject_create_and_add("cpufreq",
1989 &cpu_sysdev_class.kset.kobj);
1990 BUG_ON(!cpufreq_global_kobject);
1994 core_initcall(cpufreq_core_init);