- Updated to 2.6.22-rc2-git7:
[linux-flexiantxendom0-3.2.10.git] / arch / i386 / mach-xen / setup.c
1 /*
2  *      Machine specific setup for generic
3  */
4
5 #include <linux/smp.h>
6 #include <linux/init.h>
7 #include <linux/interrupt.h>
8 #include <linux/module.h>
9 #include <asm/acpi.h>
10 #include <asm/arch_hooks.h>
11 #include <asm/e820.h>
12 #include <asm/setup.h>
13 #include <asm/fixmap.h>
14
15 #include <xen/interface/callback.h>
16 #include <xen/interface/memory.h>
17
18 #ifdef CONFIG_HOTPLUG_CPU
19 #define DEFAULT_SEND_IPI        (1)
20 #else
21 #define DEFAULT_SEND_IPI        (0)
22 #endif
23
24 int no_broadcast=DEFAULT_SEND_IPI;
25
26 static __init int no_ipi_broadcast(char *str)
27 {
28         get_option(&str, &no_broadcast);
29         printk ("Using %s mode\n", no_broadcast ? "No IPI Broadcast" :
30                                                                                         "IPI Broadcast");
31         return 1;
32 }
33
34 __setup("no_ipi_broadcast", no_ipi_broadcast);
35
36 static int __init print_ipi_mode(void)
37 {
38         printk ("Using IPI %s mode\n", no_broadcast ? "No-Shortcut" :
39                                                                                         "Shortcut");
40         return 0;
41 }
42
43 late_initcall(print_ipi_mode);
44
45 /**
46  * machine_specific_memory_setup - Hook for machine specific memory setup.
47  *
48  * Description:
49  *      This is included late in kernel/setup.c so that it can make
50  *      use of all of the static functions.
51  **/
52
53 char * __init machine_specific_memory_setup(void)
54 {
55         int rc;
56         struct xen_memory_map memmap;
57         /*
58          * This is rather large for a stack variable but this early in
59          * the boot process we know we have plenty slack space.
60          */
61         struct e820entry map[E820MAX];
62
63         memmap.nr_entries = E820MAX;
64         set_xen_guest_handle(memmap.buffer, map);
65
66         rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
67         if ( rc == -ENOSYS ) {
68                 memmap.nr_entries = 1;
69                 map[0].addr = 0ULL;
70                 map[0].size = PFN_PHYS((unsigned long long)xen_start_info->nr_pages);
71                 /* 8MB slack (to balance backend allocations). */
72                 map[0].size += 8ULL << 20;
73                 map[0].type = E820_RAM;
74                 rc = 0;
75         }
76         BUG_ON(rc);
77
78         sanitize_e820_map(map, (char *)&memmap.nr_entries);
79
80         BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
81
82         return "Xen";
83 }
84
85
86 extern void hypervisor_callback(void);
87 extern void failsafe_callback(void);
88 extern void nmi(void);
89
90 unsigned long *machine_to_phys_mapping;
91 EXPORT_SYMBOL(machine_to_phys_mapping);
92 unsigned int machine_to_phys_order;
93 EXPORT_SYMBOL(machine_to_phys_order);
94
95 void __init machine_specific_arch_setup(void)
96 {
97         int ret;
98         struct xen_machphys_mapping mapping;
99         unsigned long machine_to_phys_nr_ents;
100         struct xen_platform_parameters pp;
101         static struct callback_register __initdata event = {
102                 .type = CALLBACKTYPE_event,
103                 .address = { __KERNEL_CS, (unsigned long)hypervisor_callback },
104         };
105         static struct callback_register __initdata failsafe = {
106                 .type = CALLBACKTYPE_failsafe,
107                 .address = { __KERNEL_CS, (unsigned long)failsafe_callback },
108         };
109         static struct callback_register __initdata nmi_cb = {
110                 .type = CALLBACKTYPE_nmi,
111                 .address = { __KERNEL_CS, (unsigned long)nmi },
112         };
113
114         ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
115         if (ret == 0)
116                 ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
117 #if CONFIG_XEN_COMPAT <= 0x030002
118         if (ret == -ENOSYS)
119                 ret = HYPERVISOR_set_callbacks(
120                         event.address.cs, event.address.eip,
121                         failsafe.address.cs, failsafe.address.eip);
122 #endif
123         BUG_ON(ret);
124
125         ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
126 #if CONFIG_XEN_COMPAT <= 0x030002
127         if (ret == -ENOSYS) {
128                 static struct xennmi_callback __initdata cb = {
129                         .handler_address = (unsigned long)nmi
130                 };
131
132                 HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
133         }
134 #endif
135
136         if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) {
137                 hypervisor_virt_start = pp.virt_start;
138                 reserve_top_address(0UL - pp.virt_start);
139         }
140
141         machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
142         machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
143         if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
144                 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
145                 machine_to_phys_nr_ents = mapping.max_mfn + 1;
146         }
147         while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
148                 machine_to_phys_order++;
149 }