1 #include <linux/kernel.h>
2 #include <linux/init.h>
3 #include <linux/memblock.h>
8 #include <asm/bios_ebda.h>
10 #define BIOS_LOWMEM_KILOBYTES 0x413
13 * The BIOS places the EBDA/XBDA at the top of conventional
14 * memory, and usually decreases the reported amount of
15 * conventional memory (int 0x12) too. This also contains a
16 * workaround for Dell systems that neglect to reserve EBDA.
17 * The same workaround also avoids a problem with the AMD768MPX
18 * chipset: reserve a page before VGA to prevent PCI prefetch
19 * into it (errata #56). Usually the page is reserved anyways,
20 * unless you have no PS/2 mouse plugged in.
22 void __init reserve_ebda_region(void)
24 unsigned int lowmem, ebda_addr;
26 /* To determine the position of the EBDA and the */
27 /* end of conventional memory, we need to look at */
28 /* the BIOS data area. In a paravirtual environment */
29 /* that area is absent. We'll just have to assume */
30 /* that the paravirt case can handle memory setup */
31 /* correctly, without our help. */
32 if (paravirt_enabled())
35 /* end of low (conventional) memory */
36 lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
39 /* start of EBDA area */
40 ebda_addr = get_bios_ebda();
42 /* Fixup: bios puts an EBDA in the top 64K segment */
43 /* of conventional memory, but does not adjust lowmem. */
44 if ((lowmem - ebda_addr) <= 0x10000)
47 /* Fixup: bios does not report an EBDA at all. */
48 /* Some old Dells seem to need 4k anyhow (bugzilla 2990) */
49 if ((ebda_addr == 0) && (lowmem >= 0x9f000))
52 /* Paranoia: should never happen, but... */
53 if ((lowmem == 0) || (lowmem >= 0x100000))
56 /* reserve all memory between lowmem and the 1MB mark */
57 memblock_reserve(lowmem, 0x100000 - lowmem);
59 #else /* CONFIG_XEN */
60 #include <linux/export.h>
61 #include <asm/fixmap.h>
62 #include <asm/mc146818rtc.h>
63 #include <asm/pgtable.h>
64 #include <asm/sections.h>
65 #include <xen/interface/callback.h>
66 #include <xen/interface/memory.h>
68 extern void hypervisor_callback(void);
69 extern void failsafe_callback(void);
70 extern void nmi(void);
73 #include <asm/proto.h>
74 #define CALLBACK_ADDR(fn) ((unsigned long)(fn))
76 #define CALLBACK_ADDR(fn) { __KERNEL_CS, (unsigned long)(fn) }
79 unsigned long __initdata xen_initrd_start;
81 unsigned long *__read_mostly machine_to_phys_mapping =
82 (void *)MACH2PHYS_VIRT_START;
83 EXPORT_SYMBOL(machine_to_phys_mapping);
84 unsigned long __read_mostly machine_to_phys_nr;
85 EXPORT_SYMBOL(machine_to_phys_nr);
87 void __init xen_start_kernel(void)
90 struct xen_machphys_mapping mapping;
94 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
95 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
96 machine_to_phys_nr = mapping.max_mfn + 1;
98 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
100 WARN_ON(machine_to_phys_mapping + (machine_to_phys_nr - 1)
101 < machine_to_phys_mapping);
104 if (!xen_feature(XENFEAT_auto_translated_physmap))
105 phys_to_machine_mapping =
106 (unsigned long *)xen_start_info->mfn_list;
108 WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
109 VMASST_TYPE_writable_pagetables));
111 memblock_reserve(ALIGN(__pa_symbol(&_end), PAGE_SIZE),
112 __pa(xen_start_info->pt_base)
113 + PFN_PHYS(xen_start_info->nr_pt_frames));
117 extern pte_t swapper_pg_fixmap[PTRS_PER_PTE];
120 /* Do an early initialization of the fixmap area */
121 make_lowmem_page_readonly(swapper_pg_fixmap, XENFEAT_writable_page_tables);
122 addr = __fix_to_virt(FIX_EARLYCON_MEM_BASE);
123 set_pmd(pmd_offset(pud_offset(swapper_pg_dir + pgd_index(addr),
126 __pmd(__pa_symbol(swapper_pg_fixmap) | _PAGE_TABLE));
133 #define __FIXADDR_TOP (-PAGE_SIZE)
134 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
135 #define FIX_BUG_ON(fix) BUILD_BUG_ON(pmd_index(__fix_to_virt(FIX_##fix)) \
136 != pmd_index(__fix_to_virt(FIX_EARLYCON_MEM_BASE)))
137 FIX_BUG_ON(SHARED_INFO);
138 FIX_BUG_ON(ISAMAP_BEGIN);
139 FIX_BUG_ON(ISAMAP_END);
143 /* Switch to the real shared_info page, and clear the dummy page. */
144 set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
145 HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
146 clear_page(empty_zero_page);
150 /* Set up mapping of lowest 1MB of physical memory. */
151 for (i = 0; i < NR_FIX_ISAMAPS; i++)
152 if (is_initial_xendomain())
153 set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
155 __set_fixmap(FIX_ISAMAP_BEGIN - i,
156 virt_to_machine(empty_zero_page),
159 if (is_initial_xendomain()) {
160 x86_platform.get_wallclock = mach_get_cmos_time;
161 x86_platform.set_wallclock = mach_set_rtc_mmss;
165 x86_init.resources.probe_roms = x86_init_noop;
168 void __init xen_arch_setup(void)
171 static const struct callback_register __initconst event = {
172 .type = CALLBACKTYPE_event,
173 .address = CALLBACK_ADDR(hypervisor_callback)
175 static const struct callback_register __initconst failsafe = {
176 .type = CALLBACKTYPE_failsafe,
177 .address = CALLBACK_ADDR(failsafe_callback)
180 static const struct callback_register __initconst syscall = {
181 .type = CALLBACKTYPE_syscall,
182 .address = CALLBACK_ADDR(system_call)
185 static const struct callback_register __initconst nmi_cb = {
186 .type = CALLBACKTYPE_nmi,
187 .address = CALLBACK_ADDR(nmi)
190 ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
192 ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
195 ret = HYPERVISOR_callback_op(CALLBACKOP_register, &syscall);
197 #if CONFIG_XEN_COMPAT <= 0x030002
200 ret = HYPERVISOR_set_callbacks(
201 event.address.cs, event.address.eip,
202 failsafe.address.cs, failsafe.address.eip);
204 ret = HYPERVISOR_set_callbacks(
212 ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
213 #if CONFIG_XEN_COMPAT <= 0x030002
214 if (ret == -ENOSYS) {
215 static struct xennmi_callback __initdata cb = {
216 .handler_address = (unsigned long)nmi
219 HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
223 #endif /* CONFIG_XEN */