2 * pcpu.c - management physical cpu in dom0 environment
4 #include <linux/acpi.h>
6 #include <linux/export.h>
7 #include <linux/interrupt.h>
8 #include <linux/kobject.h>
9 #include <linux/list.h>
10 #include <linux/spinlock.h>
11 #include <asm/hypervisor.h>
12 #include <xen/interface/platform.h>
13 #include <xen/evtchn.h>
15 #include <acpi/processor.h>
18 struct list_head pcpu_list;
25 static inline int xen_pcpu_online(uint32_t flags)
27 return !!(flags & XEN_PCPU_FLAGS_ONLINE);
30 static DEFINE_MUTEX(xen_pcpu_lock);
32 /* No need for irq disable since hotplug notify is in workqueue context */
33 #define get_pcpu_lock() mutex_lock(&xen_pcpu_lock);
34 #define put_pcpu_lock() mutex_unlock(&xen_pcpu_lock);
36 static LIST_HEAD(xen_pcpus);
38 static BLOCKING_NOTIFIER_HEAD(pcpu_chain);
40 static inline void *notifier_param(const struct pcpu *pcpu)
42 return (void *)(unsigned long)pcpu->dev.id;
45 int register_pcpu_notifier(struct notifier_block *nb)
51 err = blocking_notifier_chain_register(&pcpu_chain, nb);
56 list_for_each_entry(pcpu, &xen_pcpus, pcpu_list)
57 if (xen_pcpu_online(pcpu->flags))
58 nb->notifier_call(nb, CPU_ONLINE,
59 notifier_param(pcpu));
66 EXPORT_SYMBOL_GPL(register_pcpu_notifier);
68 void unregister_pcpu_notifier(struct notifier_block *nb)
71 blocking_notifier_chain_unregister(&pcpu_chain, nb);
74 EXPORT_SYMBOL_GPL(unregister_pcpu_notifier);
76 static int xen_pcpu_down(uint32_t xen_id)
80 op.cmd = XENPF_cpu_offline;
81 op.u.cpu_ol.cpuid = xen_id;
82 return HYPERVISOR_platform_op(&op);
85 static int xen_pcpu_up(uint32_t xen_id)
89 op.cmd = XENPF_cpu_online;
90 op.u.cpu_ol.cpuid = xen_id;
91 return HYPERVISOR_platform_op(&op);
94 static ssize_t show_online(struct device *dev,
95 struct device_attribute *attr,
98 struct pcpu *cpu = container_of(dev, struct pcpu, dev);
100 return sprintf(buf, "%d\n", xen_pcpu_online(cpu->flags));
103 static ssize_t store_online(struct device *dev,
104 struct device_attribute *attr,
105 const char *buf, size_t count)
114 ret = xen_pcpu_down(dev->id);
117 ret = xen_pcpu_up(dev->id);
128 static DEVICE_ATTR(online, 0644, show_online, store_online);
130 static ssize_t show_apicid(struct device *dev,
131 struct device_attribute *attr,
134 struct pcpu *cpu = container_of(dev, struct pcpu, dev);
136 return sprintf(buf, "%#x\n", cpu->apic_id);
138 static DEVICE_ATTR(apic_id, 0444, show_apicid, NULL);
140 static ssize_t show_acpiid(struct device *dev,
141 struct device_attribute *attr,
144 struct pcpu *cpu = container_of(dev, struct pcpu, dev);
146 return sprintf(buf, "%#x\n", cpu->acpi_id);
148 static DEVICE_ATTR(acpi_id, 0444, show_acpiid, NULL);
150 static struct bus_type xen_pcpu_subsys = {
152 .dev_name = "xen_pcpu",
155 static int xen_pcpu_free(struct pcpu *pcpu)
160 device_remove_file(&pcpu->dev, &dev_attr_online);
161 device_remove_file(&pcpu->dev, &dev_attr_apic_id);
162 device_remove_file(&pcpu->dev, &dev_attr_acpi_id);
163 device_unregister(&pcpu->dev);
164 list_del(&pcpu->pcpu_list);
170 static inline int same_pcpu(struct xenpf_pcpuinfo *info,
173 return (pcpu->apic_id == info->apic_id) &&
174 (pcpu->dev.id == info->xen_cpuid);
178 * Return 1 if online status changed
180 static int xen_pcpu_online_check(struct xenpf_pcpuinfo *info,
185 if (info->xen_cpuid != pcpu->dev.id)
188 if (xen_pcpu_online(info->flags) && !xen_pcpu_online(pcpu->flags)) {
189 /* the pcpu is onlined */
190 pcpu->flags |= XEN_PCPU_FLAGS_ONLINE;
191 blocking_notifier_call_chain(&pcpu_chain, CPU_ONLINE,
192 notifier_param(pcpu));
193 kobject_uevent(&pcpu->dev.kobj, KOBJ_ONLINE);
195 } else if (!xen_pcpu_online(info->flags) &&
196 xen_pcpu_online(pcpu->flags)) {
197 /* The pcpu is offlined now */
198 pcpu->flags &= ~XEN_PCPU_FLAGS_ONLINE;
199 blocking_notifier_call_chain(&pcpu_chain, CPU_DEAD,
200 notifier_param(pcpu));
201 kobject_uevent(&pcpu->dev.kobj, KOBJ_OFFLINE);
208 static int pcpu_dev_init(struct pcpu *cpu)
210 int err = device_register(&cpu->dev);
213 device_create_file(&cpu->dev, &dev_attr_online);
214 device_create_file(&cpu->dev, &dev_attr_apic_id);
215 device_create_file(&cpu->dev, &dev_attr_acpi_id);
220 static struct pcpu *get_pcpu(unsigned int xen_id)
224 list_for_each_entry(pcpu, &xen_pcpus, pcpu_list)
225 if (pcpu->dev.id == xen_id)
231 static struct pcpu *init_pcpu(struct xenpf_pcpuinfo *info)
236 if (info->flags & XEN_PCPU_FLAGS_INVALID)
237 return ERR_PTR(-EINVAL);
239 /* The PCPU is just added */
240 pcpu = kzalloc(sizeof(struct pcpu), GFP_KERNEL);
242 return ERR_PTR(-ENOMEM);
244 INIT_LIST_HEAD(&pcpu->pcpu_list);
245 pcpu->apic_id = info->apic_id;
246 pcpu->acpi_id = info->acpi_id;
247 pcpu->flags = info->flags;
249 pcpu->dev.bus = &xen_pcpu_subsys;
250 pcpu->dev.id = info->xen_cpuid;
252 err = pcpu_dev_init(pcpu);
258 list_add_tail(&pcpu->pcpu_list, &xen_pcpus);
262 #define PCPU_NO_CHANGE 0
264 #define PCPU_ONLINE_OFFLINE 2
265 #define PCPU_REMOVED 3
267 * Caller should hold the pcpu lock
268 * < 0: Something wrong
272 static int _sync_pcpu(unsigned int cpu_num, unsigned int *max_id)
275 struct xenpf_pcpuinfo *info;
276 xen_platform_op_t op;
279 op.cmd = XENPF_get_cpuinfo;
280 info = &op.u.pcpu_info;
281 info->xen_cpuid = cpu_num;
284 ret = HYPERVISOR_platform_op(&op);
285 } while (ret == -EBUSY);
290 *max_id = op.u.pcpu_info.max_present;
292 pcpu = get_pcpu(cpu_num);
294 if (info->flags & XEN_PCPU_FLAGS_INVALID) {
295 /* The pcpu has been removed */
300 return PCPU_NO_CHANGE;
305 pcpu = init_pcpu(info);
308 pr_warn("Failed to init pCPU %#x (%ld)\n",
309 info->xen_cpuid, PTR_ERR(pcpu));
310 return PTR_ERR(pcpu);
313 if (!same_pcpu(info, pcpu)) {
315 * Old pCPU is replaced by a new one, which means
316 * several vIRQ-s were missed - can this happen?
318 pr_warn("pCPU %#x changed!\n", pcpu->dev.id);
319 pcpu->apic_id = info->apic_id;
320 pcpu->acpi_id = info->acpi_id;
322 if (xen_pcpu_online_check(info, pcpu))
323 return PCPU_ONLINE_OFFLINE;
324 return PCPU_NO_CHANGE;
328 * Sync dom0's pcpu information with xen hypervisor's
330 static int xen_sync_pcpus(void)
333 * Boot cpu always have cpu_id 0 in xen
335 unsigned int cpu_num = 0, max_id = 0;
340 while ((result >= 0) && (cpu_num <= max_id)) {
341 result = _sync_pcpu(cpu_num, &max_id);
346 case PCPU_ONLINE_OFFLINE:
350 pr_warn("Failed to sync pcpu %#x\n", cpu_num);
357 struct pcpu *pcpu, *tmp;
359 list_for_each_entry_safe(pcpu, tmp, &xen_pcpus, pcpu_list)
368 static void xen_pcpu_dpc(struct work_struct *work)
370 if (xen_sync_pcpus() < 0)
371 pr_warn("xen_pcpu_dpc: Failed to sync pcpu information\n");
373 static DECLARE_WORK(xen_pcpu_work, xen_pcpu_dpc);
375 static irqreturn_t xen_pcpu_interrupt(int irq, void *dev_id)
377 schedule_work(&xen_pcpu_work);
382 #ifdef CONFIG_ACPI_HOTPLUG_CPU
384 int xen_pcpu_hotplug(int type)
386 schedule_work(&xen_pcpu_work);
390 EXPORT_SYMBOL_GPL(xen_pcpu_hotplug);
392 int xen_pcpu_index(uint32_t id, bool is_acpiid)
394 unsigned int cpu_num, max_id;
395 xen_platform_op_t op;
396 struct xenpf_pcpuinfo *info = &op.u.pcpu_info;
398 op.cmd = XENPF_get_cpuinfo;
399 for (max_id = cpu_num = 0; cpu_num <= max_id; ++cpu_num) {
402 info->xen_cpuid = cpu_num;
404 ret = HYPERVISOR_platform_op(&op);
405 } while (ret == -EBUSY);
409 if (info->max_present > max_id)
410 max_id = info->max_present;
411 if (id == (is_acpiid ? info->acpi_id : info->apic_id))
417 EXPORT_SYMBOL_GPL(xen_pcpu_index);
419 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
421 static int __init xen_pcpu_init(void)
425 if (!is_initial_xendomain())
428 err = subsys_system_register(&xen_pcpu_subsys, NULL);
430 pr_warn("xen_pcpu_init: "
431 "Failed to register subsys (%d)\n", err);
437 if (!list_empty(&xen_pcpus))
438 err = bind_virq_to_irqhandler(VIRQ_PCPU_STATE, 0,
439 xen_pcpu_interrupt, 0,
442 pr_warn("xen_pcpu_init: "
443 "Failed to bind virq (%d)\n", err);
447 subsys_initcall(xen_pcpu_init);