2 * Copyright (C) 1995 Linus Torvalds
4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * Memory region support
7 * David Parsons <orc@pell.chi.il.us>, July-August 1999
9 * Added E820 sanitization routine (removes overlapping memory regions);
10 * Brian Moyle <bmoyle@mvista.com>, February 2001
12 * Moved CPU detection code to cpu/${cpu}.c
13 * Patrick Mochel <mochel@osdl.org>, March 2002
15 * Provisions for empty E820 memory regions (reported by certain BIOSes).
16 * Alex Achenbach <xela@slit.de>, December 2002.
21 * This file handles the architecture-dependent parts of initialization
24 #include <linux/sched.h>
26 #include <linux/mmzone.h>
27 #include <linux/screen_info.h>
28 #include <linux/ioport.h>
29 #include <linux/acpi.h>
30 #include <linux/sfi.h>
31 #include <linux/apm_bios.h>
32 #include <linux/initrd.h>
33 #include <linux/bootmem.h>
34 #include <linux/seq_file.h>
35 #include <linux/console.h>
36 #include <linux/mca.h>
37 #include <linux/root_dev.h>
38 #include <linux/highmem.h>
39 #include <linux/module.h>
40 #include <linux/efi.h>
41 #include <linux/init.h>
42 #include <linux/edd.h>
43 #include <linux/iscsi_ibft.h>
44 #include <linux/nodemask.h>
45 #include <linux/kexec.h>
46 #include <linux/dmi.h>
47 #include <linux/pfn.h>
48 #include <linux/pci.h>
49 #include <asm/pci-direct.h>
50 #include <linux/init_ohci1394_dma.h>
51 #include <linux/kvm_para.h>
53 #include <linux/errno.h>
54 #include <linux/kernel.h>
55 #include <linux/stddef.h>
56 #include <linux/unistd.h>
57 #include <linux/ptrace.h>
58 #include <linux/user.h>
59 #include <linux/delay.h>
61 #include <linux/kallsyms.h>
62 #include <linux/cpufreq.h>
63 #include <linux/dma-mapping.h>
64 #include <linux/ctype.h>
65 #include <linux/uaccess.h>
67 #include <linux/percpu.h>
68 #include <linux/crash_dump.h>
69 #include <linux/tboot.h>
71 #include <video/edid.h>
75 #include <asm/trampoline.h>
77 #include <asm/mpspec.h>
78 #include <asm/setup.h>
80 #include <asm/timer.h>
81 #include <asm/i8259.h>
82 #include <asm/sections.h>
84 #include <asm/io_apic.h>
87 #include <asm/setup_arch.h>
88 #include <asm/bios_ebda.h>
89 #include <asm/cacheflush.h>
90 #include <asm/processor.h>
93 #include <asm/system.h>
94 #include <asm/vsyscall.h>
98 #include <asm/iommu.h>
100 #include <asm/mmu_context.h>
101 #include <asm/proto.h>
103 #include <asm/paravirt.h>
104 #include <asm/hypervisor.h>
106 #include <asm/percpu.h>
107 #include <asm/topology.h>
108 #include <asm/apicdef.h>
111 #include <asm/numa_64.h>
116 #include <asm/hypervisor.h>
117 #include <xen/interface/kexec.h>
118 #include <xen/interface/memory.h>
119 #include <xen/interface/nmi.h>
120 #include <xen/interface/physdev.h>
121 #include <xen/features.h>
122 #include <xen/firmware.h>
123 #include <xen/xencons.h>
125 static int xen_panic_event(struct notifier_block *, unsigned long, void *);
126 static struct notifier_block xen_panic_block = {
127 xen_panic_event, NULL, 0 /* try to go last */
130 unsigned long *phys_to_machine_mapping;
131 EXPORT_SYMBOL(phys_to_machine_mapping);
133 unsigned long *pfn_to_mfn_frame_list_list, **pfn_to_mfn_frame_list;
135 /* Raw start-of-day parameters from the hypervisor. */
136 start_info_t *xen_start_info;
137 EXPORT_SYMBOL(xen_start_info);
141 * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
142 * The direct mapping extends to max_pfn_mapped, so that we can directly access
143 * apertures, ACPI and other tables without having to play with fixmaps.
145 unsigned long max_low_pfn_mapped;
146 unsigned long max_pfn_mapped;
149 RESERVE_BRK(dmi_alloc, 65536);
152 unsigned int boot_cpu_id __read_mostly;
154 static __initdata unsigned long _brk_start = (unsigned long)__brk_base;
155 unsigned long _brk_end = (unsigned long)__brk_base;
159 int default_cpu_present_to_apicid(int mps_cpu)
161 return __default_cpu_present_to_apicid(mps_cpu);
164 int default_check_phys_apicid_present(int phys_apicid)
166 return __default_check_phys_apicid_present(phys_apicid);
170 #ifndef CONFIG_DEBUG_BOOT_PARAMS
171 struct boot_params __initdata boot_params;
173 struct boot_params boot_params;
180 static struct resource data_resource = {
181 .name = "Kernel data",
184 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
187 static struct resource code_resource = {
188 .name = "Kernel code",
191 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
194 static struct resource bss_resource = {
195 .name = "Kernel bss",
198 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
203 /* cpu data as detected by the assembly code in head.S */
204 struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1};
205 /* common cpu data for all cpus */
206 struct cpuinfo_x86 boot_cpu_data __read_mostly = {0, 0, 0, 0, -1, 1, 0, 0, -1};
207 EXPORT_SYMBOL(boot_cpu_data);
209 static void set_mca_bus(int x)
216 unsigned int def_to_bigsmp;
218 /* for MCA, but anyone else can use it if they want */
219 unsigned int machine_id;
220 unsigned int machine_submodel_id;
221 unsigned int BIOS_revision;
223 struct apm_info apm_info;
224 EXPORT_SYMBOL(apm_info);
227 #if defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
228 struct ist_info ist_info;
229 EXPORT_SYMBOL(ist_info);
230 #elif defined(CONFIG_X86_SPEEDSTEP_SMI)
231 struct ist_info ist_info;
235 struct cpuinfo_x86 boot_cpu_data __read_mostly = {
236 .x86_phys_bits = MAX_PHYSMEM_BITS,
238 EXPORT_SYMBOL(boot_cpu_data);
242 #if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
243 unsigned long mmu_cr4_features;
245 unsigned long mmu_cr4_features = X86_CR4_PAE;
248 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
249 int bootloader_type, bootloader_version;
254 struct screen_info screen_info;
255 EXPORT_SYMBOL(screen_info);
256 struct edid_info edid_info;
257 EXPORT_SYMBOL_GPL(edid_info);
259 extern int root_mountflags;
261 unsigned long saved_video_mode;
263 #define RAMDISK_IMAGE_START_MASK 0x07FF
264 #define RAMDISK_PROMPT_FLAG 0x8000
265 #define RAMDISK_LOAD_FLAG 0x4000
267 static char __initdata command_line[COMMAND_LINE_SIZE];
268 #ifdef CONFIG_CMDLINE_BOOL
269 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
272 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
274 #ifdef CONFIG_EDD_MODULE
279 * copy_edd() - Copy the BIOS EDD information
280 * from boot_params into a safe place.
283 static inline void __init copy_edd(void)
285 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
286 sizeof(edd.mbr_signature));
287 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
288 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
289 edd.edd_info_nr = boot_params.eddbuf_entries;
293 static inline void __init copy_edd(void)
298 void * __init extend_brk(size_t size, size_t align)
300 size_t mask = align - 1;
303 BUG_ON(_brk_start == 0);
304 BUG_ON(align & mask);
306 _brk_end = (_brk_end + mask) & ~mask;
307 BUG_ON((char *)(_brk_end + size) > __brk_limit);
309 ret = (void *)_brk_end;
312 memset(ret, 0, size);
317 #if defined(CONFIG_X86_64) && !defined(CONFIG_XEN)
318 static void __init init_gbpages(void)
320 if (direct_gbpages && cpu_has_gbpages)
321 printk(KERN_INFO "Using GB pages for direct mapping\n");
326 static inline void init_gbpages(void)
331 static void __init reserve_brk(void)
333 if (_brk_end > _brk_start)
334 reserve_early(__pa(_brk_start), __pa(_brk_end), "BRK");
336 /* Mark brk area as locked down and no longer taking any
341 #ifdef CONFIG_BLK_DEV_INITRD
343 #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
344 static void __init relocate_initrd(void)
347 /* Assume only end is not page aligned */
348 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
349 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
350 u64 area_size = PAGE_ALIGN(ramdisk_size);
351 u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
353 unsigned long slop, clen, mapaddr;
356 /* We need to move the initrd down into lowmem */
357 ramdisk_here = find_e820_area(0, end_of_lowmem, area_size,
360 if (ramdisk_here == -1ULL)
361 panic("Cannot find place for new RAMDISK of size %lld\n",
364 /* Note: this includes all the lowmem currently occupied by
365 the initrd, we rely on that fact to keep the data intact. */
366 reserve_early(ramdisk_here, ramdisk_here + area_size,
368 initrd_start = ramdisk_here + PAGE_OFFSET;
369 initrd_end = initrd_start + ramdisk_size;
370 printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n",
371 ramdisk_here, ramdisk_here + ramdisk_size);
373 q = (char *)initrd_start;
375 /* Copy any lowmem portion of the initrd */
376 if (ramdisk_image < end_of_lowmem) {
377 clen = end_of_lowmem - ramdisk_image;
378 p = (char *)__va(ramdisk_image);
381 ramdisk_image += clen;
382 ramdisk_size -= clen;
385 /* Copy the highmem portion of the initrd */
386 while (ramdisk_size) {
387 slop = ramdisk_image & ~PAGE_MASK;
389 if (clen > MAX_MAP_CHUNK-slop)
390 clen = MAX_MAP_CHUNK-slop;
391 mapaddr = ramdisk_image & PAGE_MASK;
392 p = early_memremap(mapaddr, clen+slop);
393 memcpy(q, p+slop, clen);
394 early_iounmap(p, clen+slop);
396 ramdisk_image += clen;
397 ramdisk_size -= clen;
399 /* high pages is not converted by early_res_to_bootmem */
400 ramdisk_image = boot_params.hdr.ramdisk_image;
401 ramdisk_size = boot_params.hdr.ramdisk_size;
402 printk(KERN_INFO "Move RAMDISK from %016llx - %016llx to"
403 " %08llx - %08llx\n",
404 ramdisk_image, ramdisk_image + ramdisk_size - 1,
405 ramdisk_here, ramdisk_here + ramdisk_size - 1);
407 printk(KERN_ERR "initrd extends beyond end of memory "
408 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
409 __pa(xen_start_info->mod_start) + xen_start_info->mod_len,
410 max_low_pfn_mapped << PAGE_SHIFT);
415 static void __init reserve_initrd(void)
417 /* Assume only end is not page aligned */
419 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
420 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
421 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
422 u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
424 if (!boot_params.hdr.type_of_loader ||
425 !ramdisk_image || !ramdisk_size)
426 return; /* No initrd provided by bootloader */
428 unsigned long ramdisk_image = __pa(xen_start_info->mod_start);
429 unsigned long ramdisk_size = xen_start_info->mod_len;
430 unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
431 unsigned long end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
433 if (!xen_start_info->mod_start || !ramdisk_size)
434 return; /* No initrd provided by bootloader */
439 if (ramdisk_size >= (end_of_lowmem>>1)) {
440 free_early(ramdisk_image, ramdisk_end);
441 printk(KERN_ERR "initrd too large to handle, "
442 "disabling initrd\n");
446 printk(KERN_INFO "RAMDISK: %08lx - %08lx\n", ramdisk_image,
450 if (ramdisk_end <= end_of_lowmem) {
451 /* All in lowmem, easy case */
453 * don't need to reserve again, already reserved early
454 * in i386_start_kernel
456 initrd_start = ramdisk_image + PAGE_OFFSET;
457 initrd_end = initrd_start + ramdisk_size;
458 #ifdef CONFIG_X86_64_XEN
459 initrd_below_start_ok = 1;
466 free_early(ramdisk_image, ramdisk_end);
469 static void __init reserve_initrd(void)
472 #endif /* CONFIG_BLK_DEV_INITRD */
474 static void __init parse_setup_data(void)
477 struct setup_data *data;
480 if (boot_params.hdr.version < 0x0209)
482 pa_data = boot_params.hdr.setup_data;
484 data = early_memremap(pa_data, PAGE_SIZE);
485 switch (data->type) {
487 parse_e820_ext(data, pa_data);
492 pa_data = data->next;
493 early_iounmap(data, PAGE_SIZE);
498 static void __init e820_reserve_setup_data(void)
501 struct setup_data *data;
505 if (boot_params.hdr.version < 0x0209)
507 pa_data = boot_params.hdr.setup_data;
509 data = early_memremap(pa_data, sizeof(*data));
510 e820_update_range(pa_data, sizeof(*data)+data->len,
511 E820_RAM, E820_RESERVED_KERN);
513 pa_data = data->next;
514 early_iounmap(data, sizeof(*data));
519 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
520 memcpy(&e820_saved, &e820, sizeof(struct e820map));
521 printk(KERN_INFO "extended physical RAM map:\n");
522 e820_print_map("reserve setup_data");
526 static void __init reserve_early_setup_data(void)
529 struct setup_data *data;
533 if (boot_params.hdr.version < 0x0209)
535 pa_data = boot_params.hdr.setup_data;
537 data = early_memremap(pa_data, sizeof(*data));
538 sprintf(buf, "setup data %x", data->type);
539 reserve_early(pa_data, pa_data+sizeof(*data)+data->len, buf);
540 pa_data = data->next;
541 early_iounmap(data, sizeof(*data));
548 * --------- Crashkernel reservation ------------------------------
553 static inline unsigned long long get_total_mem(void)
555 unsigned long long total;
557 total = max_pfn - min_low_pfn;
559 return total << PAGE_SHIFT;
562 static void __init reserve_crashkernel(void)
564 unsigned long long total_mem;
565 unsigned long long crash_size, crash_base;
568 total_mem = get_total_mem();
570 ret = parse_crashkernel(boot_command_line, total_mem,
571 &crash_size, &crash_base);
572 if (ret != 0 || crash_size <= 0)
575 /* 0 means: find the address automatically */
576 if (crash_base <= 0) {
577 const unsigned long long alignment = 16<<20; /* 16M */
579 crash_base = find_e820_area(alignment, ULONG_MAX, crash_size,
581 if (crash_base == -1ULL) {
582 pr_info("crashkernel reservation failed - No suitable area found.\n");
586 unsigned long long start;
588 start = find_e820_area(crash_base, ULONG_MAX, crash_size,
590 if (start != crash_base) {
591 pr_info("crashkernel reservation failed - memory is in use.\n");
595 reserve_early(crash_base, crash_base + crash_size, "CRASH KERNEL");
597 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
598 "for crashkernel (System RAM: %ldMB)\n",
599 (unsigned long)(crash_size >> 20),
600 (unsigned long)(crash_base >> 20),
601 (unsigned long)(total_mem >> 20));
603 crashk_res.start = crash_base;
604 crashk_res.end = crash_base + crash_size - 1;
605 insert_resource(&iomem_resource, &crashk_res);
608 static void __init reserve_crashkernel(void)
612 #endif /* CONFIG_XEN */
614 static struct resource standard_io_resources[] = {
615 { .name = "dma1", .start = 0x00, .end = 0x1f,
616 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
617 { .name = "pic1", .start = 0x20, .end = 0x21,
618 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
619 { .name = "timer0", .start = 0x40, .end = 0x43,
620 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
621 { .name = "timer1", .start = 0x50, .end = 0x53,
622 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
623 { .name = "keyboard", .start = 0x60, .end = 0x60,
624 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
625 { .name = "keyboard", .start = 0x64, .end = 0x64,
626 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
627 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
628 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
629 { .name = "pic2", .start = 0xa0, .end = 0xa1,
630 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
631 { .name = "dma2", .start = 0xc0, .end = 0xdf,
632 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
633 { .name = "fpu", .start = 0xf0, .end = 0xff,
634 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
637 void __init reserve_standard_io_resources(void)
641 /* Nothing to do if not running in dom0. */
642 if (!is_initial_xendomain())
645 /* request I/O space for devices used on all i[345]86 PCs */
646 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
647 request_resource(&ioport_resource, &standard_io_resources[i]);
652 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
653 * is_kdump_kernel() to determine if we are booting after a panic. Hence
654 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
657 #ifdef CONFIG_CRASH_DUMP
658 /* elfcorehdr= specifies the location of elf core header
659 * stored by the crashed kernel. This option will be passed
660 * by kexec loader to the capture kernel.
662 static int __init setup_elfcorehdr(char *arg)
667 elfcorehdr_addr = memparse(arg, &end);
668 return end > arg ? 0 : -EINVAL;
670 early_param("elfcorehdr", setup_elfcorehdr);
673 static __init void reserve_ibft_region(void)
675 unsigned long addr, size = 0;
677 addr = find_ibft_region(&size);
681 reserve_early_overlap_ok(addr, addr + size, "ibft");
685 #ifdef CONFIG_X86_RESERVE_LOW_64K
686 static int __init dmi_low_memory_corruption(const struct dmi_system_id *d)
689 "%s detected: BIOS may corrupt low RAM, working around it.\n",
692 e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED);
693 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
699 /* List of systems that have known low memory corruption BIOS problems */
700 static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
701 #ifdef CONFIG_X86_RESERVE_LOW_64K
703 .callback = dmi_low_memory_corruption,
706 DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
710 .callback = dmi_low_memory_corruption,
711 .ident = "Phoenix BIOS",
713 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"),
717 .callback = dmi_low_memory_corruption,
718 .ident = "Phoenix/MSC BIOS",
720 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix/MSC"),
724 * AMI BIOS with low memory corruption was found on Intel DG45ID and
726 * It has a different DMI_BIOS_VENDOR = "Intel Corp.", for now we will
727 * match only DMI_BOARD_NAME and see if there is more bad products
731 .callback = dmi_low_memory_corruption,
734 DMI_MATCH(DMI_BOARD_NAME, "DG45ID"),
738 .callback = dmi_low_memory_corruption,
741 DMI_MATCH(DMI_BOARD_NAME, "DG45FC"),
749 static void __init trim_bios_range(void)
752 * A special case is the first 4Kb of memory;
753 * This is a BIOS owned area, not kernel ram, but generally
754 * not listed as such in the E820 table.
756 e820_update_range(0, PAGE_SIZE, E820_RAM, E820_RESERVED);
758 * special case: Some BIOSen report the PC BIOS
759 * area (640->1Mb) as ram even though it is not.
762 e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
763 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
768 * Determine if we were loaded by an EFI loader. If so, then we have also been
769 * passed the efi memmap, systab, etc., so we should use these data structures
770 * for initialization. Note, the efi init code path is determined by the
771 * global efi_enabled. This allows the same kernel image to be used on existing
772 * systems (with a traditional BIOS) as well as on EFI systems.
775 * setup_arch - architecture-specific boot-time initializations
777 * Note: On x86_64, fixmaps are ready for use even before this is called.
780 void __init setup_arch(char **cmdline_p)
786 unsigned long p2m_pages;
787 struct physdev_set_iopl set_iopl;
789 if (!is_initial_xendomain()) {
791 /* Force a quick death if the kernel panics (not domain 0). */
792 extern int panic_timeout;
797 /* Register a call for panic conditions. */
798 atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
800 #endif /* CONFIG_XEN */
803 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
804 visws_early_detect();
806 printk(KERN_INFO "Command line: %s\n", boot_command_line);
809 /* VMI may relocate the fixmap; do this before touching ioremap area */
813 early_ioremap_init();
816 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
817 screen_info = boot_params.screen_info;
818 edid_info = boot_params.edid_info;
820 apm_info.bios = boot_params.apm_bios_info;
821 ist_info = boot_params.ist_info;
822 if (boot_params.sys_desc_table.length != 0) {
823 set_mca_bus(boot_params.sys_desc_table.table[3] & 0x2);
824 machine_id = boot_params.sys_desc_table.table[0];
825 machine_submodel_id = boot_params.sys_desc_table.table[1];
826 BIOS_revision = boot_params.sys_desc_table.table[2];
829 saved_video_mode = boot_params.hdr.vid_mode;
830 bootloader_type = boot_params.hdr.type_of_loader;
831 if ((bootloader_type >> 4) == 0xe) {
832 bootloader_type &= 0xf;
833 bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4;
835 bootloader_version = bootloader_type & 0xf;
836 bootloader_version |= boot_params.hdr.ext_loader_ver << 4;
838 #ifdef CONFIG_BLK_DEV_RAM
839 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
840 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
841 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
844 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
855 #else /* CONFIG_XEN */
857 /* This must be initialized to UNNAMED_MAJOR for ipconfig to work
858 properly. Setting ROOT_DEV to default to /dev/ram0 breaks initrd.
860 ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
862 ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
864 if (is_initial_xendomain()) {
865 const struct dom0_vga_console_info *info =
866 (void *)((char *)xen_start_info +
867 xen_start_info->console.dom0.info_off);
869 dom0_init_screen_info(info,
870 xen_start_info->console.dom0.info_size);
871 xen_start_info->console.domU.mfn = 0;
872 xen_start_info->console.domU.evtchn = 0;
874 screen_info.orig_video_isVGA = 0;
876 #endif /* CONFIG_XEN */
878 x86_init.oem.arch_setup();
882 /* update the e820_saved too */
883 e820_reserve_setup_data();
888 if (!boot_params.hdr.root_flags)
889 root_mountflags &= ~MS_RDONLY;
891 init_mm.start_code = (unsigned long) _text;
892 init_mm.end_code = (unsigned long) _etext;
893 init_mm.end_data = (unsigned long) _edata;
894 init_mm.brk = _brk_end;
896 code_resource.start = virt_to_phys(_text);
897 code_resource.end = virt_to_phys(_etext)-1;
898 data_resource.start = virt_to_phys(_etext);
899 data_resource.end = virt_to_phys(_edata)-1;
900 bss_resource.start = virt_to_phys(&__bss_start);
901 bss_resource.end = virt_to_phys(&__bss_stop)-1;
903 #ifdef CONFIG_CMDLINE_BOOL
904 #ifdef CONFIG_CMDLINE_OVERRIDE
905 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
907 if (builtin_cmdline[0]) {
908 /* append boot loader cmdline to builtin */
909 strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
910 strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
911 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
916 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
917 *cmdline_p = command_line;
920 * x86_configure_nx() is called before parse_early_param() to detect
921 * whether hardware doesn't support NX (so that the early EHCI debug
922 * console setup can safely call set_fixmap()). It may then be called
923 * again from within noexec_setup() during parsing early parameters
924 * to honor the respective command line option.
932 /* Must be before kernel pagetables are setup */
935 /* after early param, so could get panic from serial */
936 reserve_early_setup_data();
938 if (acpi_mps_check()) {
939 #if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_XEN)
942 setup_clear_cpu_cap(X86_FEATURE_APIC);
946 if (pci_early_dump_regs)
947 early_dump_pci_devices();
950 finish_e820_parsing();
955 if (is_initial_xendomain()) {
958 dmi_check_system(bad_bios_dmi_table);
962 * VMware detection requires dmi to be available, so this
963 * needs to be done after dmi_scan_machine, for the BP.
965 init_hypervisor_platform();
967 x86_init.resources.probe_roms();
970 /* after parse_early_param, so could debug it */
971 insert_resource(&iomem_resource, &code_resource);
972 insert_resource(&iomem_resource, &data_resource);
973 insert_resource(&iomem_resource, &bss_resource);
977 if (ppro_with_ram_bug()) {
978 e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM,
980 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
981 printk(KERN_INFO "fixed physical RAM map:\n");
982 e820_print_map("bad_ppro");
985 early_gart_iommu_check();
987 #endif /* CONFIG_XEN */
990 * partially used pages are not usable - thus
991 * we are rounding upwards:
993 max_pfn = e820_end_of_ram_pfn();
995 /* preallocate 4k for mptable mpc */
996 early_reserve_e820_mpc_new();
997 /* update e820 for memory not covered by WB MTRRs */
1000 if (mtrr_trim_uncached_memory(max_pfn))
1001 max_pfn = e820_end_of_ram_pfn();
1004 #ifdef CONFIG_X86_32
1005 /* max_low_pfn get updated here */
1006 find_low_pfn_range();
1008 num_physpages = max_pfn;
1009 max_mapnr = max_pfn;
1011 #ifdef CONFIG_X86_LOCAL_APIC
1015 /* How many end-of-memory variables you have, grandma! */
1016 /* need this before calling reserve_initrd */
1017 if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
1018 max_low_pfn = e820_end_of_low_ram_pfn();
1020 max_low_pfn = max_pfn;
1022 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
1024 max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT;
1028 #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
1029 setup_bios_corruption_check();
1032 printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n",
1033 max_pfn_mapped<<PAGE_SHIFT);
1038 * Find and reserve possible boot-time SMP configuration:
1042 reserve_ibft_region();
1044 reserve_trampoline_memory();
1046 #ifdef CONFIG_ACPI_SLEEP
1048 * Reserve low memory region for sleep support.
1049 * even before init_memory_mapping
1051 acpi_reserve_wakeup_memory();
1055 /* max_pfn_mapped is updated here */
1056 max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
1057 max_pfn_mapped = max_low_pfn_mapped;
1059 #ifdef CONFIG_X86_64
1060 if (max_pfn > max_low_pfn) {
1061 max_pfn_mapped = init_memory_mapping(1UL<<32,
1062 max_pfn<<PAGE_SHIFT);
1063 /* can we preseve max_low_pfn ?*/
1064 max_low_pfn = max_pfn;
1069 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
1072 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
1073 if (init_ohci1394_dma_early)
1074 init_ohci1394_dma_on_all_controllers();
1080 reserve_crashkernel();
1088 if (!is_initial_xendomain()) {
1089 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
1095 * Parse the ACPI tables for possible boot-time SMP configuration.
1097 acpi_boot_table_init();
1099 early_acpi_boot_init();
1101 #ifdef CONFIG_ACPI_NUMA
1103 * Parse SRAT to discover nodes.
1105 acpi = acpi_numa_init();
1108 #ifdef CONFIG_K8_NUMA
1110 k8 = !k8_numa_init(0, max_pfn);
1113 initmem_init(0, max_pfn, acpi, k8);
1114 #ifndef CONFIG_NO_BOOTMEM
1115 early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
1118 dma32_reserve_bootmem();
1120 #ifdef CONFIG_KVM_CLOCK
1124 x86_init.paging.pagetable_setup_start(swapper_pg_dir);
1126 x86_init.paging.pagetable_setup_done(swapper_pg_dir);
1130 #ifdef CONFIG_X86_64
1136 xen_machine_kexec_setup_resources();
1138 p2m_pages = max_pfn;
1139 if (xen_start_info->nr_pages > max_pfn) {
1141 * the max_pfn was shrunk (probably by mem= or highmem=
1142 * kernel parameter); shrink reservation with the HV
1144 struct xen_memory_reservation reservation = {
1149 unsigned int difference;
1152 difference = xen_start_info->nr_pages - max_pfn;
1154 set_xen_guest_handle(reservation.extent_start,
1155 phys_to_machine_mapping + max_pfn);
1156 reservation.nr_extents = difference;
1157 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
1159 BUG_ON(ret != difference);
1161 else if (max_pfn > xen_start_info->nr_pages)
1162 p2m_pages = xen_start_info->nr_pages;
1164 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1165 unsigned long i, j, size;
1166 unsigned int k, fpp;
1168 /* Make sure we have a large enough P->M table. */
1169 phys_to_machine_mapping = alloc_bootmem_pages(
1170 max_pfn * sizeof(unsigned long));
1171 memcpy(phys_to_machine_mapping,
1172 __va(__pa(xen_start_info->mfn_list)),
1173 p2m_pages * sizeof(unsigned long));
1174 memset(phys_to_machine_mapping + p2m_pages, ~0,
1175 (max_pfn - p2m_pages) * sizeof(unsigned long));
1177 #ifdef CONFIG_X86_64
1178 if (xen_start_info->mfn_list == VMEMMAP_START) {
1180 * Since it is well isolated we can (and since it is
1181 * perhaps large we should) also free the page tables
1182 * mapping the initial P->M table.
1184 unsigned long va = VMEMMAP_START, pa;
1185 pgd_t *pgd = pgd_offset_k(va);
1186 pud_t *pud_page = pud_offset(pgd, 0);
1188 BUILD_BUG_ON(VMEMMAP_START & ~PGDIR_MASK);
1189 xen_l4_entry_update(pgd, __pgd(0));
1191 pud_t *pud = pud_page + pud_index(va);
1195 else if (pud_large(*pud)) {
1196 pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
1197 make_pages_writable(__va(pa),
1198 PUD_SIZE >> PAGE_SHIFT,
1199 XENFEAT_writable_page_tables);
1200 free_bootmem(pa, PUD_SIZE);
1203 pmd_t *pmd = pmd_offset(pud, va);
1205 if (pmd_large(*pmd)) {
1206 pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
1207 make_pages_writable(__va(pa),
1208 PMD_SIZE >> PAGE_SHIFT,
1209 XENFEAT_writable_page_tables);
1210 free_bootmem(pa, PMD_SIZE);
1211 } else if (!pmd_none(*pmd)) {
1212 pte_t *pte = pte_offset_kernel(pmd, va);
1214 for (i = 0; i < PTRS_PER_PTE; ++i) {
1215 if (pte_none(pte[i]))
1217 pa = pte_pfn(pte[i]) << PAGE_SHIFT;
1218 make_page_writable(__va(pa),
1219 XENFEAT_writable_page_tables);
1220 free_bootmem(pa, PAGE_SIZE);
1222 ClearPagePinned(virt_to_page(pte));
1223 make_page_writable(pte,
1224 XENFEAT_writable_page_tables);
1225 free_bootmem(__pa(pte), PAGE_SIZE);
1230 ClearPagePinned(virt_to_page(pmd));
1231 make_page_writable(pmd,
1232 XENFEAT_writable_page_tables);
1233 free_bootmem(__pa((unsigned long)pmd
1240 ClearPagePinned(virt_to_page(pud_page));
1241 make_page_writable(pud_page,
1242 XENFEAT_writable_page_tables);
1243 free_bootmem(__pa((unsigned long)pud_page & PAGE_MASK),
1245 } else if (!WARN_ON(xen_start_info->mfn_list
1246 < __START_KERNEL_map))
1248 free_bootmem(__pa(xen_start_info->mfn_list),
1249 PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
1250 sizeof(unsigned long))));
1254 * Initialise the list of the frames that specify the list of
1255 * frames that make up the p2m table. Used by save/restore.
1257 fpp = PAGE_SIZE/sizeof(unsigned long);
1258 size = (max_pfn + fpp - 1) / fpp;
1259 size = (size + fpp - 1) / fpp;
1260 ++size; /* include a zero terminator for crash tools */
1261 size *= sizeof(unsigned long);
1262 pfn_to_mfn_frame_list_list = alloc_bootmem_pages(size);
1263 if (size > PAGE_SIZE
1264 && xen_create_contiguous_region((unsigned long)
1265 pfn_to_mfn_frame_list_list,
1266 get_order(size), 0))
1268 size -= sizeof(unsigned long);
1269 pfn_to_mfn_frame_list = alloc_bootmem(size);
1271 for (i = j = 0, k = -1; i < max_pfn; i += fpp, j++) {
1276 BUG_ON(k * sizeof(unsigned long) >= size);
1277 pfn_to_mfn_frame_list[k] =
1278 alloc_bootmem_pages(PAGE_SIZE);
1279 pfn_to_mfn_frame_list_list[k] =
1280 virt_to_mfn(pfn_to_mfn_frame_list[k]);
1282 pfn_to_mfn_frame_list[k][j] =
1283 virt_to_mfn(&phys_to_machine_mapping[i]);
1285 HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
1286 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
1287 virt_to_mfn(pfn_to_mfn_frame_list_list);
1290 /* Mark all ISA DMA channels in-use - using them wouldn't work. */
1291 for (i = 0; i < MAX_DMA_CHANNELS; ++i)
1292 if (i != 4 && request_dma(i, "xen") != 0)
1294 #else /* CONFIG_XEN */
1295 generic_apic_probe();
1301 * Read APIC and some other early information from ACPI tables.
1308 * get boot-time SMP configuration:
1310 if (smp_found_config)
1313 prefill_possible_map();
1315 #ifdef CONFIG_X86_64
1320 init_apic_mappings();
1321 ioapic_init_mappings();
1323 /* need to wait for io_apic is mapped */
1324 probe_nr_irqs_gsi();
1328 e820_reserve_resources();
1329 e820_mark_nosave_regions(max_low_pfn);
1331 if (is_initial_xendomain())
1332 e820_reserve_resources();
1335 x86_init.resources.reserve_resources();
1341 #if defined(CONFIG_VGA_CONSOLE)
1342 if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
1343 conswitchp = &vga_con;
1344 #elif defined(CONFIG_DUMMY_CONSOLE)
1345 conswitchp = &dummy_con;
1348 #else /* CONFIG_XEN */
1349 if (is_initial_xendomain())
1353 WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl));
1356 #ifdef CONFIG_DUMMY_CONSOLE
1357 conswitchp = &dummy_con;
1359 #ifdef CONFIG_VGA_CONSOLE
1360 if (is_initial_xendomain())
1361 conswitchp = &vga_con;
1364 #endif /* CONFIG_XEN */
1365 x86_init.oem.banner();
1370 #ifdef CONFIG_X86_32
1372 static struct resource video_ram_resource = {
1373 .name = "Video RAM area",
1376 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
1379 void __init i386_reserve_resources(void)
1381 if (is_initial_xendomain())
1382 request_resource(&iomem_resource, &video_ram_resource);
1383 reserve_standard_io_resources();
1386 #endif /* CONFIG_X86_32 */
1390 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1392 HYPERVISOR_shutdown(SHUTDOWN_crash);
1393 /* we're never actually going to get here... */
1396 #endif /* !CONFIG_XEN */