Linux 3.3-rc6
[linux-flexiantxendom0-3.2.10.git] / drivers / base / cpu.c
1 /*
2  * CPU subsystem support
3  */
4
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/sched.h>
9 #include <linux/cpu.h>
10 #include <linux/topology.h>
11 #include <linux/device.h>
12 #include <linux/node.h>
13 #include <linux/gfp.h>
14 #include <linux/percpu.h>
15
16 #include "base.h"
17
18 struct bus_type cpu_subsys = {
19         .name = "cpu",
20         .dev_name = "cpu",
21 };
22 EXPORT_SYMBOL_GPL(cpu_subsys);
23
24 static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
25
26 #ifdef CONFIG_HOTPLUG_CPU
27 static ssize_t show_online(struct device *dev,
28                            struct device_attribute *attr,
29                            char *buf)
30 {
31         struct cpu *cpu = container_of(dev, struct cpu, dev);
32
33         return sprintf(buf, "%u\n", !!cpu_online(cpu->dev.id));
34 }
35
36 static ssize_t __ref store_online(struct device *dev,
37                                   struct device_attribute *attr,
38                                   const char *buf, size_t count)
39 {
40         struct cpu *cpu = container_of(dev, struct cpu, dev);
41         ssize_t ret;
42
43         cpu_hotplug_driver_lock();
44         switch (buf[0]) {
45         case '0':
46                 ret = cpu_down(cpu->dev.id);
47                 if (!ret)
48                         kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
49                 break;
50         case '1':
51                 ret = cpu_up(cpu->dev.id);
52                 if (!ret)
53                         kobject_uevent(&dev->kobj, KOBJ_ONLINE);
54                 break;
55         default:
56                 ret = -EINVAL;
57         }
58         cpu_hotplug_driver_unlock();
59
60         if (ret >= 0)
61                 ret = count;
62         return ret;
63 }
64 static DEVICE_ATTR(online, 0644, show_online, store_online);
65
66 static void __cpuinit register_cpu_control(struct cpu *cpu)
67 {
68         device_create_file(&cpu->dev, &dev_attr_online);
69 }
70 void unregister_cpu(struct cpu *cpu)
71 {
72         int logical_cpu = cpu->dev.id;
73
74         unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
75
76         device_remove_file(&cpu->dev, &dev_attr_online);
77
78         device_unregister(&cpu->dev);
79         per_cpu(cpu_sys_devices, logical_cpu) = NULL;
80         return;
81 }
82
83 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
84 static ssize_t cpu_probe_store(struct device *dev,
85                                struct device_attribute *attr,
86                                const char *buf,
87                                size_t count)
88 {
89         return arch_cpu_probe(buf, count);
90 }
91
92 static ssize_t cpu_release_store(struct device *dev,
93                                  struct device_attribute *attr,
94                                  const char *buf,
95                                  size_t count)
96 {
97         return arch_cpu_release(buf, count);
98 }
99
100 static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
101 static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
102 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
103
104 #else /* ... !CONFIG_HOTPLUG_CPU */
105 static inline void register_cpu_control(struct cpu *cpu)
106 {
107 }
108 #endif /* CONFIG_HOTPLUG_CPU */
109
110 #ifdef CONFIG_KEXEC
111 #include <linux/kexec.h>
112
113 static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
114                                 char *buf)
115 {
116         struct cpu *cpu = container_of(dev, struct cpu, dev);
117         ssize_t rc;
118         unsigned long long addr;
119         int cpunum;
120
121         cpunum = cpu->dev.id;
122
123         /*
124          * Might be reading other cpu's data based on which cpu read thread
125          * has been scheduled. But cpu data (memory) is allocated once during
126          * boot up and this data does not change there after. Hence this
127          * operation should be safe. No locking required.
128          */
129         addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
130         rc = sprintf(buf, "%Lx\n", addr);
131         return rc;
132 }
133 static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
134 #endif
135
136 /*
137  * Print cpu online, possible, present, and system maps
138  */
139
140 struct cpu_attr {
141         struct device_attribute attr;
142         const struct cpumask *const * const map;
143 };
144
145 static ssize_t show_cpus_attr(struct device *dev,
146                               struct device_attribute *attr,
147                               char *buf)
148 {
149         struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
150         int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map));
151
152         buf[n++] = '\n';
153         buf[n] = '\0';
154         return n;
155 }
156
157 #define _CPU_ATTR(name, map) \
158         { __ATTR(name, 0444, show_cpus_attr, NULL), map }
159
160 /* Keep in sync with cpu_subsys_attrs */
161 static struct cpu_attr cpu_attrs[] = {
162         _CPU_ATTR(online, &cpu_online_mask),
163         _CPU_ATTR(possible, &cpu_possible_mask),
164         _CPU_ATTR(present, &cpu_present_mask),
165 };
166
167 /*
168  * Print values for NR_CPUS and offlined cpus
169  */
170 static ssize_t print_cpus_kernel_max(struct device *dev,
171                                      struct device_attribute *attr, char *buf)
172 {
173         int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
174         return n;
175 }
176 static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
177
178 /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
179 unsigned int total_cpus;
180
181 static ssize_t print_cpus_offline(struct device *dev,
182                                   struct device_attribute *attr, char *buf)
183 {
184         int n = 0, len = PAGE_SIZE-2;
185         cpumask_var_t offline;
186
187         /* display offline cpus < nr_cpu_ids */
188         if (!alloc_cpumask_var(&offline, GFP_KERNEL))
189                 return -ENOMEM;
190         cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
191         n = cpulist_scnprintf(buf, len, offline);
192         free_cpumask_var(offline);
193
194         /* display offline cpus >= nr_cpu_ids */
195         if (total_cpus && nr_cpu_ids < total_cpus) {
196                 if (n && n < len)
197                         buf[n++] = ',';
198
199                 if (nr_cpu_ids == total_cpus-1)
200                         n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
201                 else
202                         n += snprintf(&buf[n], len - n, "%d-%d",
203                                                       nr_cpu_ids, total_cpus-1);
204         }
205
206         n += snprintf(&buf[n], len - n, "\n");
207         return n;
208 }
209 static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
210
211 static void cpu_device_release(struct device *dev)
212 {
213         /*
214          * This is an empty function to prevent the driver core from spitting a
215          * warning at us.  Yes, I know this is directly opposite of what the
216          * documentation for the driver core and kobjects say, and the author
217          * of this code has already been publically ridiculed for doing
218          * something as foolish as this.  However, at this point in time, it is
219          * the only way to handle the issue of statically allocated cpu
220          * devices.  The different architectures will have their cpu device
221          * code reworked to properly handle this in the near future, so this
222          * function will then be changed to correctly free up the memory held
223          * by the cpu device.
224          *
225          * Never copy this way of doing things, or you too will be made fun of
226          * on the linux-kerenl list, you have been warned.
227          */
228 }
229
230 /*
231  * register_cpu - Setup a sysfs device for a CPU.
232  * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
233  *        sysfs for this CPU.
234  * @num - CPU number to use when creating the device.
235  *
236  * Initialize and register the CPU device.
237  */
238 int __cpuinit register_cpu(struct cpu *cpu, int num)
239 {
240         int error;
241
242         cpu->node_id = cpu_to_node(num);
243         memset(&cpu->dev, 0x00, sizeof(struct device));
244         cpu->dev.id = num;
245         cpu->dev.bus = &cpu_subsys;
246         cpu->dev.release = cpu_device_release;
247         error = device_register(&cpu->dev);
248         if (!error && cpu->hotpluggable)
249                 register_cpu_control(cpu);
250         if (!error)
251                 per_cpu(cpu_sys_devices, num) = &cpu->dev;
252         if (!error)
253                 register_cpu_under_node(num, cpu_to_node(num));
254
255 #ifdef CONFIG_KEXEC
256         if (!error)
257                 error = device_create_file(&cpu->dev, &dev_attr_crash_notes);
258 #endif
259         return error;
260 }
261
262 struct device *get_cpu_device(unsigned cpu)
263 {
264         if (cpu < nr_cpu_ids && cpu_possible(cpu))
265                 return per_cpu(cpu_sys_devices, cpu);
266         else
267                 return NULL;
268 }
269 EXPORT_SYMBOL_GPL(get_cpu_device);
270
271 static struct attribute *cpu_root_attrs[] = {
272 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
273         &dev_attr_probe.attr,
274         &dev_attr_release.attr,
275 #endif
276         &cpu_attrs[0].attr.attr,
277         &cpu_attrs[1].attr.attr,
278         &cpu_attrs[2].attr.attr,
279         &dev_attr_kernel_max.attr,
280         &dev_attr_offline.attr,
281         NULL
282 };
283
284 static struct attribute_group cpu_root_attr_group = {
285         .attrs = cpu_root_attrs,
286 };
287
288 static const struct attribute_group *cpu_root_attr_groups[] = {
289         &cpu_root_attr_group,
290         NULL,
291 };
292
293 bool cpu_is_hotpluggable(unsigned cpu)
294 {
295         struct device *dev = get_cpu_device(cpu);
296         return dev && container_of(dev, struct cpu, dev)->hotpluggable;
297 }
298 EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
299
300 #ifdef CONFIG_GENERIC_CPU_DEVICES
301 static DEFINE_PER_CPU(struct cpu, cpu_devices);
302 #endif
303
304 static void __init cpu_dev_register_generic(void)
305 {
306 #ifdef CONFIG_GENERIC_CPU_DEVICES
307         int i;
308
309         for_each_possible_cpu(i) {
310                 if (register_cpu(&per_cpu(cpu_devices, i), i))
311                         panic("Failed to register CPU device");
312         }
313 #endif
314 }
315
316 void __init cpu_dev_init(void)
317 {
318         if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
319                 panic("Failed to register CPU subsystem");
320
321         cpu_dev_register_generic();
322
323 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
324         sched_create_sysfs_power_savings_entries(cpu_subsys.dev_root);
325 #endif
326 }