1 #include <linux/kernel.h>
2 #include <linux/init.h>
6 #include <asm/bios_ebda.h>
8 #define BIOS_LOWMEM_KILOBYTES 0x413
11 * The BIOS places the EBDA/XBDA at the top of conventional
12 * memory, and usually decreases the reported amount of
13 * conventional memory (int 0x12) too. This also contains a
14 * workaround for Dell systems that neglect to reserve EBDA.
15 * The same workaround also avoids a problem with the AMD768MPX
16 * chipset: reserve a page before VGA to prevent PCI prefetch
17 * into it (errata #56). Usually the page is reserved anyways,
18 * unless you have no PS/2 mouse plugged in.
20 void __init reserve_ebda_region(void)
22 unsigned int lowmem, ebda_addr;
24 /* To determine the position of the EBDA and the */
25 /* end of conventional memory, we need to look at */
26 /* the BIOS data area. In a paravirtual environment */
27 /* that area is absent. We'll just have to assume */
28 /* that the paravirt case can handle memory setup */
29 /* correctly, without our help. */
30 if (paravirt_enabled())
33 /* end of low (conventional) memory */
34 lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
37 /* start of EBDA area */
38 ebda_addr = get_bios_ebda();
40 /* Fixup: bios puts an EBDA in the top 64K segment */
41 /* of conventional memory, but does not adjust lowmem. */
42 if ((lowmem - ebda_addr) <= 0x10000)
45 /* Fixup: bios does not report an EBDA at all. */
46 /* Some old Dells seem to need 4k anyhow (bugzilla 2990) */
47 if ((ebda_addr == 0) && (lowmem >= 0x9f000))
50 /* Paranoia: should never happen, but... */
51 if ((lowmem == 0) || (lowmem >= 0x100000))
54 /* reserve all memory between lowmem and the 1MB mark */
55 reserve_early_overlap_ok(lowmem, 0x100000, "BIOS reserved");
57 #else /* CONFIG_XEN */
58 #include <linux/module.h>
59 #include <asm/fixmap.h>
60 #include <asm/pgtable.h>
61 #include <asm/sections.h>
62 #include <xen/interface/callback.h>
63 #include <xen/interface/memory.h>
65 extern void hypervisor_callback(void);
66 extern void failsafe_callback(void);
67 extern void nmi(void);
70 #include <asm/proto.h>
71 #define CALLBACK_ADDR(fn) ((unsigned long)(fn))
73 #define CALLBACK_ADDR(fn) { __KERNEL_CS, (unsigned long)(fn) }
76 unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
77 EXPORT_SYMBOL(machine_to_phys_mapping);
78 unsigned int machine_to_phys_order;
79 EXPORT_SYMBOL(machine_to_phys_order);
81 void __init xen_start_kernel(void)
84 struct xen_machphys_mapping mapping;
85 unsigned long machine_to_phys_nr_ents;
87 struct xen_platform_parameters pp;
88 extern pte_t swapper_pg_fixmap[PTRS_PER_PTE];
94 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
95 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
96 machine_to_phys_nr_ents = mapping.max_mfn + 1;
98 machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
99 while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
100 machine_to_phys_order++;
102 if (!xen_feature(XENFEAT_auto_translated_physmap))
103 phys_to_machine_mapping =
104 (unsigned long *)xen_start_info->mfn_list;
106 WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
107 VMASST_TYPE_writable_pagetables));
109 reserve_early(ALIGN(__pa_symbol(&_end), PAGE_SIZE),
110 __pa(xen_start_info->pt_base)
111 + (xen_start_info->nr_pt_frames << PAGE_SHIFT),
115 WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
116 VMASST_TYPE_4gb_segments));
118 init_mm.pgd = swapper_pg_dir = (pgd_t *)xen_start_info->pt_base;
120 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) {
121 hypervisor_virt_start = pp.virt_start;
122 reserve_top_address(0UL - pp.virt_start);
125 BUG_ON(pte_index(hypervisor_virt_start));
127 /* Do an early initialization of the fixmap area */
128 make_lowmem_page_readonly(swapper_pg_fixmap, XENFEAT_writable_page_tables);
129 addr = __fix_to_virt(FIX_EARLYCON_MEM_BASE);
130 set_pmd(pmd_offset(pud_offset(swapper_pg_dir + pgd_index(addr),
133 __pmd(__pa_symbol(swapper_pg_fixmap) | _PAGE_TABLE));
139 #define __FIXADDR_TOP (-PAGE_SIZE)
140 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
141 #define FIX_BUG_ON(fix) BUILD_BUG_ON(pmd_index(__fix_to_virt(FIX_##fix)) \
142 != pmd_index(__fix_to_virt(FIX_EARLYCON_MEM_BASE)))
143 FIX_BUG_ON(SHARED_INFO);
144 FIX_BUG_ON(ISAMAP_BEGIN);
145 FIX_BUG_ON(ISAMAP_END);
149 /* Switch to the real shared_info page, and clear the dummy page. */
150 set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
151 HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
152 memset(empty_zero_page, 0, sizeof(empty_zero_page));
156 /* Set up mapping of lowest 1MB of physical memory. */
157 for (i = 0; i < NR_FIX_ISAMAPS; i++)
158 if (is_initial_xendomain())
159 set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
161 __set_fixmap(FIX_ISAMAP_BEGIN - i,
162 virt_to_machine(empty_zero_page),
167 void __init xen_arch_setup(void)
170 static const struct callback_register __initconst event = {
171 .type = CALLBACKTYPE_event,
172 .address = CALLBACK_ADDR(hypervisor_callback)
174 static const struct callback_register __initconst failsafe = {
175 .type = CALLBACKTYPE_failsafe,
176 .address = CALLBACK_ADDR(failsafe_callback)
179 static const struct callback_register __initconst syscall = {
180 .type = CALLBACKTYPE_syscall,
181 .address = CALLBACK_ADDR(system_call)
184 static const struct callback_register __initconst nmi_cb = {
185 .type = CALLBACKTYPE_nmi,
186 .address = CALLBACK_ADDR(nmi)
189 ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
191 ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
194 ret = HYPERVISOR_callback_op(CALLBACKOP_register, &syscall);
196 #if CONFIG_XEN_COMPAT <= 0x030002
199 ret = HYPERVISOR_set_callbacks(
200 event.address.cs, event.address.eip,
201 failsafe.address.cs, failsafe.address.eip);
203 ret = HYPERVISOR_set_callbacks(
211 ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
212 #if CONFIG_XEN_COMPAT <= 0x030002
213 if (ret == -ENOSYS) {
214 static struct xennmi_callback __initdata cb = {
215 .handler_address = (unsigned long)nmi
218 HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
222 #endif /* CONFIG_XEN */