Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / drivers / xen / core / pcpu.c
1 /*
2  * pcpu.c - management physical cpu in dom0 environment
3  */
4 #include <linux/acpi.h>
5 #include <linux/err.h>
6 #include <linux/export.h>
7 #include <linux/interrupt.h>
8 #include <linux/kobject.h>
9 #include <linux/list.h>
10 #include <linux/spinlock.h>
11 #include <asm/hypervisor.h>
12 #include <xen/interface/platform.h>
13 #include <xen/evtchn.h>
14 #include <xen/pcpu.h>
15 #include <acpi/processor.h>
16
17 struct pcpu {
18         struct list_head pcpu_list;
19         struct device dev;
20         uint32_t apic_id;
21         uint32_t acpi_id;
22         uint32_t flags;
23 };
24
25 static inline int xen_pcpu_online(uint32_t flags)
26 {
27         return !!(flags & XEN_PCPU_FLAGS_ONLINE);
28 }
29
30 static DEFINE_MUTEX(xen_pcpu_lock);
31
32 /* No need for irq disable since hotplug notify is in workqueue context */
33 #define get_pcpu_lock() mutex_lock(&xen_pcpu_lock);
34 #define put_pcpu_lock() mutex_unlock(&xen_pcpu_lock);
35
36 static LIST_HEAD(xen_pcpus);
37
38 static BLOCKING_NOTIFIER_HEAD(pcpu_chain);
39
40 static inline void *notifier_param(const struct pcpu *pcpu)
41 {
42         return (void *)(unsigned long)pcpu->dev.id;
43 }
44
45 int register_pcpu_notifier(struct notifier_block *nb)
46 {
47         int err;
48
49         get_pcpu_lock();
50
51         err = blocking_notifier_chain_register(&pcpu_chain, nb);
52
53         if (!err) {
54                 struct pcpu *pcpu;
55
56                 list_for_each_entry(pcpu, &xen_pcpus, pcpu_list)
57                         if (xen_pcpu_online(pcpu->flags))
58                                 nb->notifier_call(nb, CPU_ONLINE,
59                                                   notifier_param(pcpu));
60         }
61
62         put_pcpu_lock();
63
64         return err;
65 }
66 EXPORT_SYMBOL_GPL(register_pcpu_notifier);
67
68 void unregister_pcpu_notifier(struct notifier_block *nb)
69 {
70         get_pcpu_lock();
71         blocking_notifier_chain_unregister(&pcpu_chain, nb);
72         put_pcpu_lock();
73 }
74 EXPORT_SYMBOL_GPL(unregister_pcpu_notifier);
75
76 static int xen_pcpu_down(uint32_t xen_id)
77 {
78         xen_platform_op_t op;
79
80         op.cmd = XENPF_cpu_offline;
81         op.u.cpu_ol.cpuid = xen_id;
82         return HYPERVISOR_platform_op(&op);
83 }
84
85 static int xen_pcpu_up(uint32_t xen_id)
86 {
87         xen_platform_op_t op;
88
89         op.cmd = XENPF_cpu_online;
90         op.u.cpu_ol.cpuid = xen_id;
91         return HYPERVISOR_platform_op(&op);
92 }
93
94 static ssize_t show_online(struct device *dev,
95                            struct device_attribute *attr,
96                            char *buf)
97 {
98         struct pcpu *cpu = container_of(dev, struct pcpu, dev);
99
100         return sprintf(buf, "%d\n", xen_pcpu_online(cpu->flags));
101 }
102
103 static ssize_t store_online(struct device *dev,
104                             struct device_attribute *attr,
105                             const char *buf, size_t count)
106 {
107         ssize_t ret;
108
109         if (!count)
110                 return -EINVAL;
111
112         switch (buf[0]) {
113         case '0':
114                 ret = xen_pcpu_down(dev->id);
115                 break;
116         case '1':
117                 ret = xen_pcpu_up(dev->id);
118                 break;
119         default:
120                 ret = -EINVAL;
121         }
122
123         if (ret >= 0)
124                 ret = count;
125         return ret;
126 }
127
128 static DEVICE_ATTR(online, 0644, show_online, store_online);
129
130 static ssize_t show_apicid(struct device *dev,
131                            struct device_attribute *attr,
132                            char *buf)
133 {
134         struct pcpu *cpu = container_of(dev, struct pcpu, dev);
135
136         return sprintf(buf, "%#x\n", cpu->apic_id);
137 }
138 static DEVICE_ATTR(apic_id, 0444, show_apicid, NULL);
139
140 static ssize_t show_acpiid(struct device *dev,
141                            struct device_attribute *attr,
142                            char *buf)
143 {
144         struct pcpu *cpu = container_of(dev, struct pcpu, dev);
145
146         return sprintf(buf, "%#x\n", cpu->acpi_id);
147 }
148 static DEVICE_ATTR(acpi_id, 0444, show_acpiid, NULL);
149
150 static struct bus_type xen_pcpu_subsys = {
151         .name = "xen_pcpu",
152         .dev_name = "xen_pcpu",
153 };
154
155 static int xen_pcpu_free(struct pcpu *pcpu)
156 {
157         if (!pcpu)
158                 return 0;
159
160         device_remove_file(&pcpu->dev, &dev_attr_online);
161         device_remove_file(&pcpu->dev, &dev_attr_apic_id);
162         device_remove_file(&pcpu->dev, &dev_attr_acpi_id);
163         device_unregister(&pcpu->dev);
164         list_del(&pcpu->pcpu_list);
165         kfree(pcpu);
166
167         return 0;
168 }
169
170 static inline int same_pcpu(struct xenpf_pcpuinfo *info,
171                             struct pcpu *pcpu)
172 {
173         return (pcpu->apic_id == info->apic_id) &&
174                 (pcpu->dev.id == info->xen_cpuid);
175 }
176
177 /*
178  * Return 1 if online status changed
179  */
180 static int xen_pcpu_online_check(struct xenpf_pcpuinfo *info,
181                                  struct pcpu *pcpu)
182 {
183         int result = 0;
184
185         if (info->xen_cpuid != pcpu->dev.id)
186                 return 0;
187
188         if (xen_pcpu_online(info->flags) && !xen_pcpu_online(pcpu->flags)) {
189                 /* the pcpu is onlined */
190                 pcpu->flags |= XEN_PCPU_FLAGS_ONLINE;
191                 blocking_notifier_call_chain(&pcpu_chain, CPU_ONLINE,
192                                              notifier_param(pcpu));
193                 kobject_uevent(&pcpu->dev.kobj, KOBJ_ONLINE);
194                 result = 1;
195         } else if (!xen_pcpu_online(info->flags) &&
196                    xen_pcpu_online(pcpu->flags))  {
197                 /* The pcpu is offlined now */
198                 pcpu->flags &= ~XEN_PCPU_FLAGS_ONLINE;
199                 blocking_notifier_call_chain(&pcpu_chain, CPU_DEAD,
200                                              notifier_param(pcpu));
201                 kobject_uevent(&pcpu->dev.kobj, KOBJ_OFFLINE);
202                 result = 1;
203         }
204
205         return result;
206 }
207
208 static int pcpu_dev_init(struct pcpu *cpu)
209 {
210         int err = device_register(&cpu->dev);
211
212         if (!err) {
213                 device_create_file(&cpu->dev, &dev_attr_online);
214                 device_create_file(&cpu->dev, &dev_attr_apic_id);
215                 device_create_file(&cpu->dev, &dev_attr_acpi_id);
216         }
217         return err;
218 }
219
220 static struct pcpu *get_pcpu(unsigned int xen_id)
221 {
222         struct pcpu *pcpu;
223
224         list_for_each_entry(pcpu, &xen_pcpus, pcpu_list)
225                 if (pcpu->dev.id == xen_id)
226                         return pcpu;
227
228         return NULL;
229 }
230
231 static struct pcpu *init_pcpu(struct xenpf_pcpuinfo *info)
232 {
233         struct pcpu *pcpu;
234         int err;
235
236         if (info->flags & XEN_PCPU_FLAGS_INVALID)
237                 return ERR_PTR(-EINVAL);
238
239         /* The PCPU is just added */
240         pcpu = kzalloc(sizeof(struct pcpu), GFP_KERNEL);
241         if (!pcpu)
242                 return ERR_PTR(-ENOMEM);
243
244         INIT_LIST_HEAD(&pcpu->pcpu_list);
245         pcpu->apic_id = info->apic_id;
246         pcpu->acpi_id = info->acpi_id;
247         pcpu->flags = info->flags;
248
249         pcpu->dev.bus = &xen_pcpu_subsys;
250         pcpu->dev.id = info->xen_cpuid;
251
252         err = pcpu_dev_init(pcpu);
253         if (err) {
254                 kfree(pcpu);
255                 return ERR_PTR(err);
256         }
257
258         list_add_tail(&pcpu->pcpu_list, &xen_pcpus);
259         return pcpu;
260 }
261
262 #define PCPU_NO_CHANGE                  0
263 #define PCPU_ADDED                      1
264 #define PCPU_ONLINE_OFFLINE             2
265 #define PCPU_REMOVED                    3
266 /*
267  * Caller should hold the pcpu lock
268  * < 0: Something wrong
269  * 0: No changes
270  * > 0: State changed
271  */
272 static int _sync_pcpu(unsigned int cpu_num, unsigned int *max_id)
273 {
274         struct pcpu *pcpu;
275         struct xenpf_pcpuinfo *info;
276         xen_platform_op_t op;
277         int ret;
278
279         op.cmd = XENPF_get_cpuinfo;
280         info = &op.u.pcpu_info;
281         info->xen_cpuid = cpu_num;
282
283         do {
284                 ret = HYPERVISOR_platform_op(&op);
285         } while (ret == -EBUSY);
286         if (ret)
287                 return ret;
288
289         if (max_id)
290                 *max_id = op.u.pcpu_info.max_present;
291
292         pcpu = get_pcpu(cpu_num);
293
294         if (info->flags & XEN_PCPU_FLAGS_INVALID) {
295                 /* The pcpu has been removed */
296                 if (pcpu) {
297                         xen_pcpu_free(pcpu);
298                         return PCPU_REMOVED;
299                 }
300                 return PCPU_NO_CHANGE;
301         }
302
303
304         if (!pcpu) {
305                 pcpu = init_pcpu(info);
306                 if (!IS_ERR(pcpu))
307                         return PCPU_ADDED;
308                 pr_warn("Failed to init pCPU %#x (%ld)\n",
309                         info->xen_cpuid, PTR_ERR(pcpu));
310                 return PTR_ERR(pcpu);
311         }
312
313         if (!same_pcpu(info, pcpu)) {
314                 /*
315                  * Old pCPU is replaced by a new one, which means
316                  * several vIRQ-s were missed - can this happen?
317                  */
318                 pr_warn("pCPU %#x changed!\n", pcpu->dev.id);
319                 pcpu->apic_id = info->apic_id;
320                 pcpu->acpi_id = info->acpi_id;
321         }
322         if (xen_pcpu_online_check(info, pcpu))
323                 return PCPU_ONLINE_OFFLINE;
324         return PCPU_NO_CHANGE;
325 }
326
327 /*
328  * Sync dom0's pcpu information with xen hypervisor's
329  */
330 static int xen_sync_pcpus(void)
331 {
332         /*
333          * Boot cpu always have cpu_id 0 in xen
334          */
335         unsigned int cpu_num = 0, max_id = 0;
336         int result = 0;
337
338         get_pcpu_lock();
339
340         while ((result >= 0) && (cpu_num <= max_id)) {
341                 result = _sync_pcpu(cpu_num, &max_id);
342
343                 switch (result) {
344                 case PCPU_NO_CHANGE:
345                 case PCPU_ADDED:
346                 case PCPU_ONLINE_OFFLINE:
347                 case PCPU_REMOVED:
348                         break;
349                 default:
350                         pr_warn("Failed to sync pcpu %#x\n", cpu_num);
351                         break;
352                 }
353                 cpu_num++;
354         }
355
356         if (result < 0) {
357                 struct pcpu *pcpu, *tmp;
358
359                 list_for_each_entry_safe(pcpu, tmp, &xen_pcpus, pcpu_list)
360                         xen_pcpu_free(pcpu);
361         }
362
363         put_pcpu_lock();
364
365         return result;
366 }
367
368 static void xen_pcpu_dpc(struct work_struct *work)
369 {
370         if (xen_sync_pcpus() < 0)
371                 pr_warn("xen_pcpu_dpc: Failed to sync pcpu information\n");
372 }
373 static DECLARE_WORK(xen_pcpu_work, xen_pcpu_dpc);
374
375 static irqreturn_t xen_pcpu_interrupt(int irq, void *dev_id)
376 {
377         schedule_work(&xen_pcpu_work);
378
379         return IRQ_HANDLED;
380 }
381
382 #ifdef CONFIG_ACPI_HOTPLUG_CPU
383
384 int xen_pcpu_hotplug(int type)
385 {
386         schedule_work(&xen_pcpu_work);
387
388         return 0;
389 }
390 EXPORT_SYMBOL_GPL(xen_pcpu_hotplug);
391
392 int xen_pcpu_index(uint32_t id, bool is_acpiid)
393 {
394         unsigned int cpu_num, max_id;
395         xen_platform_op_t op;
396         struct xenpf_pcpuinfo *info = &op.u.pcpu_info;
397
398         op.cmd = XENPF_get_cpuinfo;
399         for (max_id = cpu_num = 0; cpu_num <= max_id; ++cpu_num) {
400                 int ret;
401
402                 info->xen_cpuid = cpu_num;
403                 do {
404                         ret = HYPERVISOR_platform_op(&op);
405                 } while (ret == -EBUSY);
406                 if (ret)
407                         continue;
408
409                 if (info->max_present > max_id)
410                         max_id = info->max_present;
411                 if (id == (is_acpiid ? info->acpi_id : info->apic_id))
412                         return cpu_num;
413         }
414
415         return -1;
416 }
417 EXPORT_SYMBOL_GPL(xen_pcpu_index);
418
419 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
420
421 static int __init xen_pcpu_init(void)
422 {
423         int err;
424
425         if (!is_initial_xendomain())
426                 return 0;
427
428         err = subsys_system_register(&xen_pcpu_subsys, NULL);
429         if (err) {
430                 pr_warn("xen_pcpu_init: "
431                         "Failed to register subsys (%d)\n", err);
432                 return err;
433         }
434
435         xen_sync_pcpus();
436
437         if (!list_empty(&xen_pcpus))
438                 err = bind_virq_to_irqhandler(VIRQ_PCPU_STATE, 0,
439                                               xen_pcpu_interrupt, 0,
440                                               "pcpu", NULL);
441         if (err < 0)
442                 pr_warn("xen_pcpu_init: "
443                         "Failed to bind virq (%d)\n", err);
444
445         return err;
446 }
447 subsys_initcall(xen_pcpu_init);