2 * Machine specific setup for generic
6 #include <linux/init.h>
7 #include <linux/interrupt.h>
8 #include <linux/module.h>
10 #include <asm/arch_hooks.h>
12 #include <asm/setup.h>
13 #include <asm/fixmap.h>
15 #include <xen/interface/callback.h>
16 #include <xen/interface/memory.h>
18 #ifdef CONFIG_HOTPLUG_CPU
19 #define DEFAULT_SEND_IPI (1)
21 #define DEFAULT_SEND_IPI (0)
24 int no_broadcast=DEFAULT_SEND_IPI;
26 static __init int no_ipi_broadcast(char *str)
28 get_option(&str, &no_broadcast);
29 printk ("Using %s mode\n", no_broadcast ? "No IPI Broadcast" :
34 __setup("no_ipi_broadcast", no_ipi_broadcast);
36 static int __init print_ipi_mode(void)
38 printk ("Using IPI %s mode\n", no_broadcast ? "No-Shortcut" :
43 late_initcall(print_ipi_mode);
46 * machine_specific_memory_setup - Hook for machine specific memory setup.
49 * This is included late in kernel/setup.c so that it can make
50 * use of all of the static functions.
53 char * __init machine_specific_memory_setup(void)
56 struct xen_memory_map memmap;
58 * This is rather large for a stack variable but this early in
59 * the boot process we know we have plenty slack space.
61 struct e820entry map[E820MAX];
63 memmap.nr_entries = E820MAX;
64 set_xen_guest_handle(memmap.buffer, map);
66 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
67 if ( rc == -ENOSYS ) {
68 memmap.nr_entries = 1;
70 map[0].size = PFN_PHYS((unsigned long long)xen_start_info->nr_pages);
71 /* 8MB slack (to balance backend allocations). */
72 map[0].size += 8ULL << 20;
73 map[0].type = E820_RAM;
78 sanitize_e820_map(map, (char *)&memmap.nr_entries);
80 BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
86 extern void hypervisor_callback(void);
87 extern void failsafe_callback(void);
88 extern void nmi(void);
90 unsigned long *machine_to_phys_mapping;
91 EXPORT_SYMBOL(machine_to_phys_mapping);
92 unsigned int machine_to_phys_order;
93 EXPORT_SYMBOL(machine_to_phys_order);
95 void __init machine_specific_arch_setup(void)
98 struct xen_machphys_mapping mapping;
99 unsigned long machine_to_phys_nr_ents;
100 struct xen_platform_parameters pp;
101 static struct callback_register __initdata event = {
102 .type = CALLBACKTYPE_event,
103 .address = { __KERNEL_CS, (unsigned long)hypervisor_callback },
105 static struct callback_register __initdata failsafe = {
106 .type = CALLBACKTYPE_failsafe,
107 .address = { __KERNEL_CS, (unsigned long)failsafe_callback },
109 static struct callback_register __initdata nmi_cb = {
110 .type = CALLBACKTYPE_nmi,
111 .address = { __KERNEL_CS, (unsigned long)nmi },
114 ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
116 ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
117 #if CONFIG_XEN_COMPAT <= 0x030002
119 ret = HYPERVISOR_set_callbacks(
120 event.address.cs, event.address.eip,
121 failsafe.address.cs, failsafe.address.eip);
125 ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
126 #if CONFIG_XEN_COMPAT <= 0x030002
127 if (ret == -ENOSYS) {
128 static struct xennmi_callback __initdata cb = {
129 .handler_address = (unsigned long)nmi
132 HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
136 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) {
137 hypervisor_virt_start = pp.virt_start;
138 reserve_top_address(0UL - pp.virt_start);
141 machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
142 machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
143 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
144 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
145 machine_to_phys_nr_ents = mapping.max_mfn + 1;
147 while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
148 machine_to_phys_order++;