- Updated to 2.6.22-rc2-git7:
[linux-flexiantxendom0-3.2.10.git] / arch / x86_64 / kernel / setup-xen.c
1 /*
2  *  linux/arch/x86-64/kernel/setup.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *
6  *  Nov 2001 Dave Jones <davej@suse.de>
7  *  Forked from i386 setup code.
8  */
9
10 /*
11  * This file handles the architecture-dependent parts of initialization
12  */
13
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/ptrace.h>
21 #include <linux/slab.h>
22 #include <linux/user.h>
23 #include <linux/a.out.h>
24 #include <linux/screen_info.h>
25 #include <linux/ioport.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/initrd.h>
29 #include <linux/highmem.h>
30 #include <linux/bootmem.h>
31 #include <linux/module.h>
32 #include <asm/processor.h>
33 #include <linux/console.h>
34 #include <linux/seq_file.h>
35 #include <linux/crash_dump.h>
36 #include <linux/root_dev.h>
37 #include <linux/pci.h>
38 #include <linux/acpi.h>
39 #include <linux/kallsyms.h>
40 #include <linux/edd.h>
41 #include <linux/mmzone.h>
42 #include <linux/kexec.h>
43 #include <linux/cpufreq.h>
44 #include <linux/dmi.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/ctype.h>
47
48 #include <asm/mtrr.h>
49 #include <asm/uaccess.h>
50 #include <asm/system.h>
51 #include <asm/io.h>
52 #include <asm/smp.h>
53 #include <asm/msr.h>
54 #include <asm/desc.h>
55 #include <video/edid.h>
56 #include <asm/e820.h>
57 #include <asm/dma.h>
58 #include <asm/mpspec.h>
59 #include <asm/mmu_context.h>
60 #include <asm/bootsetup.h>
61 #include <asm/proto.h>
62 #include <asm/setup.h>
63 #include <asm/mach_apic.h>
64 #include <asm/numa.h>
65 #include <asm/sections.h>
66 #include <asm/dmi.h>
67 #ifdef CONFIG_XEN
68 #include <linux/percpu.h>
69 #include <xen/interface/physdev.h>
70 #include "setup_arch_pre.h"
71 #include <asm/hypervisor.h>
72 #include <xen/interface/nmi.h>
73 #include <xen/features.h>
74 #include <xen/xencons.h>
75 #define PFN_UP(x)       (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
76 #define PFN_PHYS(x)     ((x) << PAGE_SHIFT)
77 #include <asm/mach-xen/setup_arch_post.h>
78 #include <xen/interface/memory.h>
79
80 #ifdef CONFIG_XEN
81 #include <xen/interface/kexec.h>
82 #endif
83
84 extern unsigned long start_pfn;
85 extern struct edid_info edid_info;
86
87 shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
88 EXPORT_SYMBOL(HYPERVISOR_shared_info);
89
90 extern char hypercall_page[PAGE_SIZE];
91 EXPORT_SYMBOL(hypercall_page);
92
93 static int xen_panic_event(struct notifier_block *, unsigned long, void *);
94 static struct notifier_block xen_panic_block = {
95         xen_panic_event, NULL, 0 /* try to go last */
96 };
97
98 unsigned long *phys_to_machine_mapping;
99 unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
100
101 EXPORT_SYMBOL(phys_to_machine_mapping);
102
103 DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
104 DEFINE_PER_CPU(int, nr_multicall_ents);
105
106 /* Raw start-of-day parameters from the hypervisor. */
107 start_info_t *xen_start_info;
108 EXPORT_SYMBOL(xen_start_info);
109 #endif
110
111 /*
112  * Machine setup..
113  */
114
115 struct cpuinfo_x86 boot_cpu_data __read_mostly;
116 EXPORT_SYMBOL(boot_cpu_data);
117
118 unsigned long mmu_cr4_features;
119
120 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
121 int bootloader_type;
122
123 unsigned long saved_video_mode;
124
125 /* 
126  * Early DMI memory
127  */
128 int dmi_alloc_index;
129 char dmi_alloc_data[DMI_MAX_DATA];
130
131 /*
132  * Setup options
133  */
134 struct screen_info screen_info;
135 EXPORT_SYMBOL(screen_info);
136 struct sys_desc_table_struct {
137         unsigned short length;
138         unsigned char table[0];
139 };
140
141 struct edid_info edid_info;
142 EXPORT_SYMBOL_GPL(edid_info);
143
144 extern int root_mountflags;
145
146 char __initdata command_line[COMMAND_LINE_SIZE];
147
148 struct resource standard_io_resources[] = {
149         { .name = "dma1", .start = 0x00, .end = 0x1f,
150                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
151         { .name = "pic1", .start = 0x20, .end = 0x21,
152                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
153         { .name = "timer0", .start = 0x40, .end = 0x43,
154                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
155         { .name = "timer1", .start = 0x50, .end = 0x53,
156                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
157         { .name = "keyboard", .start = 0x60, .end = 0x6f,
158                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
159         { .name = "dma page reg", .start = 0x80, .end = 0x8f,
160                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
161         { .name = "pic2", .start = 0xa0, .end = 0xa1,
162                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
163         { .name = "dma2", .start = 0xc0, .end = 0xdf,
164                 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
165         { .name = "fpu", .start = 0xf0, .end = 0xff,
166                 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
167 };
168
169 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
170
171 struct resource data_resource = {
172         .name = "Kernel data",
173         .start = 0,
174         .end = 0,
175         .flags = IORESOURCE_RAM,
176 };
177 struct resource code_resource = {
178         .name = "Kernel code",
179         .start = 0,
180         .end = 0,
181         .flags = IORESOURCE_RAM,
182 };
183
184 #ifdef CONFIG_PROC_VMCORE
185 /* elfcorehdr= specifies the location of elf core header
186  * stored by the crashed kernel. This option will be passed
187  * by kexec loader to the capture kernel.
188  */
189 static int __init setup_elfcorehdr(char *arg)
190 {
191         char *end;
192         if (!arg)
193                 return -EINVAL;
194         elfcorehdr_addr = memparse(arg, &end);
195         return end > arg ? 0 : -EINVAL;
196 }
197 early_param("elfcorehdr", setup_elfcorehdr);
198 #endif
199
200 #ifndef CONFIG_NUMA
201 static void __init
202 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
203 {
204         unsigned long bootmap_size, bootmap;
205
206         bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
207         bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
208         if (bootmap == -1L)
209                 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
210         bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
211         e820_register_active_regions(0, start_pfn, end_pfn);
212 #ifdef CONFIG_XEN
213         free_bootmem_with_active_regions(0, xen_start_info->nr_pages);
214 #else
215         free_bootmem_with_active_regions(0, end_pfn);
216 #endif
217         reserve_bootmem(bootmap, bootmap_size);
218
219 #endif
220
221 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
222 struct edd edd;
223 #ifdef CONFIG_EDD_MODULE
224 EXPORT_SYMBOL(edd);
225 #endif
226 /**
227  * copy_edd() - Copy the BIOS EDD information
228  *              from boot_params into a safe place.
229  *
230  */
231 static inline void copy_edd(void)
232 {
233      memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
234      memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
235      edd.mbr_signature_nr = EDD_MBR_SIG_NR;
236      edd.edd_info_nr = EDD_NR;
237 }
238 #else
239 static inline void copy_edd(void)
240 {
241 }
242 #endif
243
244 #ifndef CONFIG_XEN
245 #define EBDA_ADDR_POINTER 0x40E
246
247 unsigned __initdata ebda_addr;
248 unsigned __initdata ebda_size;
249
250 static void discover_ebda(void)
251 {
252         /*
253          * there is a real-mode segmented pointer pointing to the 
254          * 4K EBDA area at 0x40E
255          */
256         ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER;
257         ebda_addr <<= 4;
258
259         ebda_size = *(unsigned short *)(unsigned long)ebda_addr;
260
261         /* Round EBDA up to pages */
262         if (ebda_size == 0)
263                 ebda_size = 1;
264         ebda_size <<= 10;
265         ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
266         if (ebda_size > 64*1024)
267                 ebda_size = 64*1024;
268 }
269 #else
270 #define discover_ebda() ((void)0)
271 #endif
272
273 void __init setup_arch(char **cmdline_p)
274 {
275 #ifdef CONFIG_XEN
276         extern struct e820map machine_e820;
277
278         printk(KERN_INFO "Command line: %s\n", boot_command_line);
279
280         ROOT_DEV = MKDEV(RAMDISK_MAJOR,0); 
281         screen_info = SCREEN_INFO;
282
283         if (is_initial_xendomain()) {
284                 /* This is drawn from a dump from vgacon:startup in
285                  * standard Linux. */
286                 screen_info.orig_video_mode = 3;
287                 screen_info.orig_video_isVGA = 1;
288                 screen_info.orig_video_lines = 25;
289                 screen_info.orig_video_cols = 80;
290                 screen_info.orig_video_ega_bx = 3;
291                 screen_info.orig_video_points = 16;
292                 screen_info.orig_y = screen_info.orig_video_lines - 1;
293                 if (xen_start_info->console.dom0.info_size >=
294                     sizeof(struct dom0_vga_console_info)) {
295                         const struct dom0_vga_console_info *info =
296                                 (struct dom0_vga_console_info *)(
297                                         (char *)xen_start_info +
298                                         xen_start_info->console.dom0.info_off);
299                         dom0_init_screen_info(info);
300                 }
301                 xen_start_info->console.domU.mfn = 0;
302                 xen_start_info->console.domU.evtchn = 0;
303         } else {
304                 screen_info.orig_video_isVGA = 0;
305
306                 /* Register a call for panic conditions. */
307                 atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
308         }
309
310         edid_info = EDID_INFO;
311         saved_video_mode = SAVED_VIDEO_MODE;
312         bootloader_type = LOADER_TYPE;
313
314 #ifdef CONFIG_BLK_DEV_RAM
315         rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
316         rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
317         rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
318
319
320 #endif
321
322         HYPERVISOR_vm_assist(VMASST_CMD_enable,
323                              VMASST_TYPE_writable_pagetables);
324
325         ARCH_SETUP
326 #else
327         printk(KERN_INFO "Command line: %s\n", boot_command_line);
328
329         ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
330         screen_info = SCREEN_INFO;
331         edid_info = EDID_INFO;
332         saved_video_mode = SAVED_VIDEO_MODE;
333         bootloader_type = LOADER_TYPE;
334
335 #ifdef CONFIG_BLK_DEV_RAM
336         rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
337         rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
338         rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
339 #endif
340 #endif  /* !CONFIG_XEN */
341         setup_memory_region();
342         copy_edd();
343
344         if (!MOUNT_ROOT_RDONLY)
345                 root_mountflags &= ~MS_RDONLY;
346         init_mm.start_code = (unsigned long) &_text;
347         init_mm.end_code = (unsigned long) &_etext;
348         init_mm.end_data = (unsigned long) &_edata;
349         init_mm.brk = (unsigned long) &_end;
350
351         code_resource.start = virt_to_phys(&_text);
352         code_resource.end = virt_to_phys(&_etext)-1;
353         data_resource.start = virt_to_phys(&_etext);
354         data_resource.end = virt_to_phys(&_edata)-1;
355
356         early_identify_cpu(&boot_cpu_data);
357
358         strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
359         *cmdline_p = command_line;
360
361         parse_early_param();
362
363         finish_e820_parsing();
364
365         e820_register_active_regions(0, 0, -1UL);
366         /*
367          * partially used pages are not usable - thus
368          * we are rounding upwards:
369          */
370         end_pfn = e820_end_of_ram();
371         num_physpages = end_pfn;
372
373         check_efer();
374
375         discover_ebda();
376
377         init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
378
379         if (is_initial_xendomain())
380                 dmi_scan_machine();
381
382         /* How many end-of-memory variables you have, grandma! */
383         max_low_pfn = end_pfn;
384         max_pfn = end_pfn;
385         high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
386
387         /* Remove active ranges so rediscovery with NUMA-awareness happens */
388         remove_all_active_ranges();
389
390 #ifdef CONFIG_ACPI_NUMA
391         /*
392          * Parse SRAT to discover nodes.
393          */
394         acpi_numa_init();
395 #endif
396
397 #ifdef CONFIG_NUMA
398         numa_initmem_init(0, end_pfn); 
399 #else
400         contig_initmem_init(0, end_pfn);
401 #endif
402
403 #ifdef CONFIG_XEN
404         /*
405          * Reserve kernel, physmap, start info, initial page tables, and
406          * direct mapping.
407          */
408         reserve_bootmem_generic(__pa_symbol(&_text),
409                                 (table_end << PAGE_SHIFT) - __pa_symbol(&_text));
410 #else
411         /* Reserve direct mapping */
412         reserve_bootmem_generic(table_start << PAGE_SHIFT, 
413                                 (table_end - table_start) << PAGE_SHIFT);
414
415         /* reserve kernel */
416         reserve_bootmem_generic(__pa_symbol(&_text),
417                                 __pa_symbol(&_end) - __pa_symbol(&_text));
418
419         /*
420          * reserve physical page 0 - it's a special BIOS page on many boxes,
421          * enabling clean reboots, SMP operation, laptop functions.
422          */
423         reserve_bootmem_generic(0, PAGE_SIZE);
424
425         /* reserve ebda region */
426         if (ebda_addr)
427                 reserve_bootmem_generic(ebda_addr, ebda_size);
428 #ifdef CONFIG_NUMA
429         /* reserve nodemap region */
430         if (nodemap_addr)
431                 reserve_bootmem_generic(nodemap_addr, nodemap_size);
432 #endif
433
434 #ifdef CONFIG_SMP
435         /*
436          * But first pinch a few for the stack/trampoline stuff
437          * FIXME: Don't need the extra page at 4K, but need to fix
438          * trampoline before removing it. (see the GDT stuff)
439          */
440         reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
441
442         /* Reserve SMP trampoline */
443         reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
444 #endif
445 #endif
446
447 #ifdef CONFIG_ACPI_SLEEP
448        /*
449         * Reserve low memory region for sleep support.
450         */
451        acpi_reserve_bootmem();
452 #endif
453 #ifdef CONFIG_BLK_DEV_INITRD
454 #ifndef CONFIG_XEN
455         if (LOADER_TYPE && INITRD_START) {
456 #else
457         if (xen_start_info->mod_start) {
458 #endif
459                 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
460 #ifndef CONFIG_XEN
461                         reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
462 #else
463                         initrd_below_start_ok = 1;
464 #endif
465                         initrd_start = INITRD_START + PAGE_OFFSET;
466                         initrd_end = initrd_start+INITRD_SIZE;
467                 }
468                 else {
469                         printk(KERN_ERR "initrd extends beyond end of memory "
470                             "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
471                             (unsigned long)(INITRD_START + INITRD_SIZE),
472                             (unsigned long)(end_pfn << PAGE_SHIFT));
473                         initrd_start = 0;
474                 }
475         }
476 #endif
477 #ifdef CONFIG_KEXEC
478 #ifdef CONFIG_XEN
479         xen_machine_kexec_setup_resources();
480 #else
481         if (crashk_res.start != crashk_res.end) {
482                 reserve_bootmem_generic(crashk_res.start,
483                         crashk_res.end - crashk_res.start + 1);
484         }
485 #endif
486 #endif
487
488         paging_init();
489 #ifdef CONFIG_X86_LOCAL_APIC
490         /*
491          * Find and reserve possible boot-time SMP configuration:
492          */
493         find_smp_config();
494 #endif
495 #ifdef CONFIG_XEN
496         {
497                 int i, j, k, fpp;
498
499                 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
500                         /* Make sure we have a large enough P->M table. */
501                         phys_to_machine_mapping = alloc_bootmem_pages(
502                                 end_pfn * sizeof(unsigned long));
503                         memset(phys_to_machine_mapping, ~0,
504                                end_pfn * sizeof(unsigned long));
505                         memcpy(phys_to_machine_mapping,
506                                (unsigned long *)xen_start_info->mfn_list,
507                                xen_start_info->nr_pages * sizeof(unsigned long));
508                         free_bootmem(
509                                 __pa(xen_start_info->mfn_list),
510                                 PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
511                                                 sizeof(unsigned long))));
512
513                         /*
514                          * Initialise the list of the frames that specify the
515                          * list of frames that make up the p2m table. Used by
516                          * save/restore.
517                          */
518                         pfn_to_mfn_frame_list_list = alloc_bootmem_pages(PAGE_SIZE);
519
520                         fpp = PAGE_SIZE/sizeof(unsigned long);
521                         for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {
522                                 if ((j % fpp) == 0) {
523                                         k++;
524                                         BUG_ON(k>=fpp);
525                                         pfn_to_mfn_frame_list[k] =
526                                                 alloc_bootmem_pages(PAGE_SIZE);
527                                         pfn_to_mfn_frame_list_list[k] =
528                                                 virt_to_mfn(pfn_to_mfn_frame_list[k]);
529                                         j=0;
530                                 }
531                                 pfn_to_mfn_frame_list[k][j] =
532                                         virt_to_mfn(&phys_to_machine_mapping[i]);
533                         }
534                         HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
535                         HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
536                                 virt_to_mfn(pfn_to_mfn_frame_list_list);
537                 }
538
539         }
540
541 #ifdef CONFIG_ACPI
542         if (!is_initial_xendomain()) {
543                 acpi_disabled = 1;
544                 acpi_ht = 0;
545         }
546 #endif
547 #endif
548
549 #if defined(CONFIG_PCI) && !defined(CONFIG_XEN)
550         early_quirks();
551 #endif
552
553         zap_low_mappings(0);
554
555         /*
556          * set this early, so we dont allocate cpu0
557          * if MADT list doesnt list BSP first
558          * mpparse.c/MP_processor_info() allocates logical cpu numbers.
559          */
560         cpu_set(0, cpu_present_map);
561 #ifdef CONFIG_ACPI
562         /*
563          * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
564          * Call this early for SRAT node setup.
565          */
566         acpi_boot_table_init();
567
568         /*
569          * Read APIC and some other early information from ACPI tables.
570          */
571         acpi_boot_init();
572 #endif
573
574         init_cpu_to_node();
575
576 #ifdef CONFIG_X86_LOCAL_APIC
577         /*
578          * get boot-time SMP configuration:
579          */
580         if (smp_found_config)
581                 get_smp_config();
582 #ifndef CONFIG_XEN
583         init_apic_mappings();
584 #endif
585 #endif
586
587         /*
588          * We trust e820 completely. No explicit ROM probing in memory.
589          */
590 #ifdef CONFIG_XEN
591         if (is_initial_xendomain()) {
592                 struct xen_memory_map memmap;
593
594                 memmap.nr_entries = E820MAX;
595                 set_xen_guest_handle(memmap.buffer, machine_e820.map);
596
597                 if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
598                         BUG();
599                 machine_e820.nr_map = memmap.nr_entries;
600
601                 e820_reserve_resources(machine_e820.map, machine_e820.nr_map);
602         }
603 #else
604         e820_reserve_resources(e820.map, e820.nr_map);
605         e820_mark_nosave_regions();
606 #endif
607
608         {
609         unsigned i;
610         /* request I/O space for devices used on all i[345]86 PCs */
611         for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
612                 request_resource(&ioport_resource, &standard_io_resources[i]);
613         }
614
615 #ifdef CONFIG_XEN
616         if (is_initial_xendomain())
617                 e820_setup_gap(machine_e820.map, machine_e820.nr_map);
618 #else
619         e820_setup_gap(e820.map, e820.nr_map);
620 #endif
621
622 #ifdef CONFIG_XEN
623         {
624                 struct physdev_set_iopl set_iopl;
625
626                 set_iopl.iopl = 1;
627                 HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
628
629                 if (is_initial_xendomain()) {
630 #ifdef CONFIG_VT
631 #if defined(CONFIG_VGA_CONSOLE)
632                         conswitchp = &vga_con;
633 #elif defined(CONFIG_DUMMY_CONSOLE)
634                         conswitchp = &dummy_con;
635 #endif
636 #endif
637                 } else {
638 #if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
639                         conswitchp = &dummy_con;
640 #endif
641                 }
642         }
643         xencons_early_setup();
644 #else   /* CONFIG_XEN */
645
646 #ifdef CONFIG_VT
647 #if defined(CONFIG_VGA_CONSOLE)
648         conswitchp = &vga_con;
649 #elif defined(CONFIG_DUMMY_CONSOLE)
650         conswitchp = &dummy_con;
651 #endif
652 #endif
653
654 #endif /* !CONFIG_XEN */
655 }
656
657 #ifdef CONFIG_XEN
658 static int
659 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
660 {
661         HYPERVISOR_shutdown(SHUTDOWN_crash);
662         /* we're never actually going to get here... */
663         return NOTIFY_DONE;
664 }
665 #endif /* !CONFIG_XEN */
666
667
668 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
669 {
670         unsigned int *v;
671
672         if (c->extended_cpuid_level < 0x80000004)
673                 return 0;
674
675         v = (unsigned int *) c->x86_model_id;
676         cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
677         cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
678         cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
679         c->x86_model_id[48] = 0;
680         return 1;
681 }
682
683
684 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
685 {
686         unsigned int n, dummy, eax, ebx, ecx, edx;
687
688         n = c->extended_cpuid_level;
689
690         if (n >= 0x80000005) {
691                 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
692                 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
693                         edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
694                 c->x86_cache_size=(ecx>>24)+(edx>>24);
695                 /* On K8 L1 TLB is inclusive, so don't count it */
696                 c->x86_tlbsize = 0;
697         }
698
699         if (n >= 0x80000006) {
700                 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
701                 ecx = cpuid_ecx(0x80000006);
702                 c->x86_cache_size = ecx >> 16;
703                 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
704
705                 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
706                 c->x86_cache_size, ecx & 0xFF);
707         }
708
709         if (n >= 0x80000007)
710                 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power); 
711         if (n >= 0x80000008) {
712                 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); 
713                 c->x86_virt_bits = (eax >> 8) & 0xff;
714                 c->x86_phys_bits = eax & 0xff;
715         }
716 }
717
718 #ifdef CONFIG_NUMA
719 static int nearby_node(int apicid)
720 {
721         int i;
722         for (i = apicid - 1; i >= 0; i--) {
723                 int node = apicid_to_node[i];
724                 if (node != NUMA_NO_NODE && node_online(node))
725                         return node;
726         }
727         for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
728                 int node = apicid_to_node[i];
729                 if (node != NUMA_NO_NODE && node_online(node))
730                         return node;
731         }
732         return first_node(node_online_map); /* Shouldn't happen */
733 }
734 #endif
735
736 /*
737  * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
738  * Assumes number of cores is a power of two.
739  */
740 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
741 {
742 #ifdef CONFIG_SMP
743         unsigned bits;
744 #ifdef CONFIG_NUMA
745         int cpu = smp_processor_id();
746         int node = 0;
747         unsigned apicid = hard_smp_processor_id();
748 #endif
749         unsigned ecx = cpuid_ecx(0x80000008);
750
751         c->x86_max_cores = (ecx & 0xff) + 1;
752
753         /* CPU telling us the core id bits shift? */
754         bits = (ecx >> 12) & 0xF;
755
756         /* Otherwise recompute */
757         if (bits == 0) {
758                 while ((1 << bits) < c->x86_max_cores)
759                         bits++;
760         }
761
762         /* Low order bits define the core id (index of core in socket) */
763         c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
764         /* Convert the APIC ID into the socket ID */
765         c->phys_proc_id = phys_pkg_id(bits);
766
767 #ifdef CONFIG_NUMA
768         node = c->phys_proc_id;
769         if (apicid_to_node[apicid] != NUMA_NO_NODE)
770                 node = apicid_to_node[apicid];
771         if (!node_online(node)) {
772                 /* Two possibilities here:
773                    - The CPU is missing memory and no node was created.
774                    In that case try picking one from a nearby CPU
775                    - The APIC IDs differ from the HyperTransport node IDs
776                    which the K8 northbridge parsing fills in.
777                    Assume they are all increased by a constant offset,
778                    but in the same order as the HT nodeids.
779                    If that doesn't result in a usable node fall back to the
780                    path for the previous case.  */
781                 int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
782                 if (ht_nodeid >= 0 &&
783                     apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
784                         node = apicid_to_node[ht_nodeid];
785                 /* Pick a nearby node */
786                 if (!node_online(node))
787                         node = nearby_node(apicid);
788         }
789         numa_set_node(cpu, node);
790
791         printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
792 #endif
793 #endif
794 }
795
796 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
797 {
798         unsigned level;
799
800 #ifdef CONFIG_SMP
801         unsigned long value;
802
803         /*
804          * Disable TLB flush filter by setting HWCR.FFDIS on K8
805          * bit 6 of msr C001_0015
806          *
807          * Errata 63 for SH-B3 steppings
808          * Errata 122 for all steppings (F+ have it disabled by default)
809          */
810         if (c->x86 == 15) {
811                 rdmsrl(MSR_K8_HWCR, value);
812                 value |= 1 << 6;
813                 wrmsrl(MSR_K8_HWCR, value);
814         }
815 #endif
816
817         /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
818            3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
819         clear_bit(0*32+31, &c->x86_capability);
820         
821         /* On C+ stepping K8 rep microcode works well for copy/memset */
822         level = cpuid_eax(1);
823         if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
824                 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
825
826         /* Enable workaround for FXSAVE leak */
827         if (c->x86 >= 6)
828                 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
829
830         level = get_model_name(c);
831         if (!level) {
832                 switch (c->x86) { 
833                 case 15:
834                         /* Should distinguish Models here, but this is only
835                            a fallback anyways. */
836                         strcpy(c->x86_model_id, "Hammer");
837                         break; 
838                 } 
839         } 
840         display_cacheinfo(c);
841
842         /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
843         if (c->x86_power & (1<<8))
844                 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
845
846         /* Multi core CPU? */
847         if (c->extended_cpuid_level >= 0x80000008)
848                 amd_detect_cmp(c);
849
850         /* Fix cpuid4 emulation for more */
851         num_cache_leaves = 3;
852
853         /* RDTSC can be speculated around */
854         clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
855 }
856
857 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
858 {
859 #ifdef CONFIG_SMP
860         u32     eax, ebx, ecx, edx;
861         int     index_msb, core_bits;
862
863         cpuid(1, &eax, &ebx, &ecx, &edx);
864
865
866         if (!cpu_has(c, X86_FEATURE_HT))
867                 return;
868         if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
869                 goto out;
870
871         smp_num_siblings = (ebx & 0xff0000) >> 16;
872
873         if (smp_num_siblings == 1) {
874                 printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
875         } else if (smp_num_siblings > 1 ) {
876
877                 if (smp_num_siblings > NR_CPUS) {
878                         printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
879                         smp_num_siblings = 1;
880                         return;
881                 }
882
883                 index_msb = get_count_order(smp_num_siblings);
884                 c->phys_proc_id = phys_pkg_id(index_msb);
885
886                 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
887
888                 index_msb = get_count_order(smp_num_siblings) ;
889
890                 core_bits = get_count_order(c->x86_max_cores);
891
892                 c->cpu_core_id = phys_pkg_id(index_msb) &
893                                                ((1 << core_bits) - 1);
894         }
895 out:
896         if ((c->x86_max_cores * smp_num_siblings) > 1) {
897                 printk(KERN_INFO  "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
898                 printk(KERN_INFO  "CPU: Processor Core ID: %d\n", c->cpu_core_id);
899         }
900
901 #endif
902 }
903
904 /*
905  * find out the number of processor cores on the die
906  */
907 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
908 {
909         unsigned int eax, t;
910
911         if (c->cpuid_level < 4)
912                 return 1;
913
914         cpuid_count(4, 0, &eax, &t, &t, &t);
915
916         if (eax & 0x1f)
917                 return ((eax >> 26) + 1);
918         else
919                 return 1;
920 }
921
922 static void srat_detect_node(void)
923 {
924 #ifdef CONFIG_NUMA
925         unsigned node;
926         int cpu = smp_processor_id();
927         int apicid = hard_smp_processor_id();
928
929         /* Don't do the funky fallback heuristics the AMD version employs
930            for now. */
931         node = apicid_to_node[apicid];
932         if (node == NUMA_NO_NODE)
933                 node = first_node(node_online_map);
934         numa_set_node(cpu, node);
935
936         printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
937 #endif
938 }
939
940 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
941 {
942         /* Cache sizes */
943         unsigned n;
944
945         init_intel_cacheinfo(c);
946         if (c->cpuid_level > 9 ) {
947                 unsigned eax = cpuid_eax(10);
948                 /* Check for version and the number of counters */
949                 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
950                         set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
951         }
952
953         if (cpu_has_ds) {
954                 unsigned int l1, l2;
955                 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
956                 if (!(l1 & (1<<11)))
957                         set_bit(X86_FEATURE_BTS, c->x86_capability);
958                 if (!(l1 & (1<<12)))
959                         set_bit(X86_FEATURE_PEBS, c->x86_capability);
960         }
961
962         n = c->extended_cpuid_level;
963         if (n >= 0x80000008) {
964                 unsigned eax = cpuid_eax(0x80000008);
965                 c->x86_virt_bits = (eax >> 8) & 0xff;
966                 c->x86_phys_bits = eax & 0xff;
967                 /* CPUID workaround for Intel 0F34 CPU */
968                 if (c->x86_vendor == X86_VENDOR_INTEL &&
969                     c->x86 == 0xF && c->x86_model == 0x3 &&
970                     c->x86_mask == 0x4)
971                         c->x86_phys_bits = 36;
972         }
973
974         if (c->x86 == 15)
975                 c->x86_cache_alignment = c->x86_clflush_size * 2;
976         if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
977             (c->x86 == 0x6 && c->x86_model >= 0x0e))
978                 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
979         if (c->x86 == 6)
980                 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
981         if (c->x86 == 15)
982                 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
983         else
984                 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
985         c->x86_max_cores = intel_num_cpu_cores(c);
986
987         srat_detect_node();
988 }
989
990 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
991 {
992         char *v = c->x86_vendor_id;
993
994         if (!strcmp(v, "AuthenticAMD"))
995                 c->x86_vendor = X86_VENDOR_AMD;
996         else if (!strcmp(v, "GenuineIntel"))
997                 c->x86_vendor = X86_VENDOR_INTEL;
998         else
999                 c->x86_vendor = X86_VENDOR_UNKNOWN;
1000 }
1001
1002 struct cpu_model_info {
1003         int vendor;
1004         int family;
1005         char *model_names[16];
1006 };
1007
1008 /* Do some early cpuid on the boot CPU to get some parameter that are
1009    needed before check_bugs. Everything advanced is in identify_cpu
1010    below. */
1011 void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1012 {
1013         u32 tfms;
1014
1015         c->loops_per_jiffy = loops_per_jiffy;
1016         c->x86_cache_size = -1;
1017         c->x86_vendor = X86_VENDOR_UNKNOWN;
1018         c->x86_model = c->x86_mask = 0; /* So far unknown... */
1019         c->x86_vendor_id[0] = '\0'; /* Unset */
1020         c->x86_model_id[0] = '\0';  /* Unset */
1021         c->x86_clflush_size = 64;
1022         c->x86_cache_alignment = c->x86_clflush_size;
1023         c->x86_max_cores = 1;
1024         c->extended_cpuid_level = 0;
1025         memset(&c->x86_capability, 0, sizeof c->x86_capability);
1026
1027         /* Get vendor name */
1028         cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
1029               (unsigned int *)&c->x86_vendor_id[0],
1030               (unsigned int *)&c->x86_vendor_id[8],
1031               (unsigned int *)&c->x86_vendor_id[4]);
1032                 
1033         get_cpu_vendor(c);
1034
1035         /* Initialize the standard set of capabilities */
1036         /* Note that the vendor-specific code below might override */
1037
1038         /* Intel-defined flags: level 0x00000001 */
1039         if (c->cpuid_level >= 0x00000001) {
1040                 __u32 misc;
1041                 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
1042                       &c->x86_capability[0]);
1043                 c->x86 = (tfms >> 8) & 0xf;
1044                 c->x86_model = (tfms >> 4) & 0xf;
1045                 c->x86_mask = tfms & 0xf;
1046                 if (c->x86 == 0xf)
1047                         c->x86 += (tfms >> 20) & 0xff;
1048                 if (c->x86 >= 0x6)
1049                         c->x86_model += ((tfms >> 16) & 0xF) << 4;
1050                 if (c->x86_capability[0] & (1<<19)) 
1051                         c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1052         } else {
1053                 /* Have CPUID level 0 only - unheard of */
1054                 c->x86 = 4;
1055         }
1056
1057 #ifdef CONFIG_SMP
1058         c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
1059 #endif
1060 }
1061
1062 /*
1063  * This does the hard work of actually picking apart the CPU stuff...
1064  */
1065 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1066 {
1067         int i;
1068         u32 xlvl;
1069
1070         early_identify_cpu(c);
1071
1072         /* AMD-defined flags: level 0x80000001 */
1073         xlvl = cpuid_eax(0x80000000);
1074         c->extended_cpuid_level = xlvl;
1075         if ((xlvl & 0xffff0000) == 0x80000000) {
1076                 if (xlvl >= 0x80000001) {
1077                         c->x86_capability[1] = cpuid_edx(0x80000001);
1078                         c->x86_capability[6] = cpuid_ecx(0x80000001);
1079                 }
1080                 if (xlvl >= 0x80000004)
1081                         get_model_name(c); /* Default name */
1082         }
1083
1084         /* Transmeta-defined flags: level 0x80860001 */
1085         xlvl = cpuid_eax(0x80860000);
1086         if ((xlvl & 0xffff0000) == 0x80860000) {
1087                 /* Don't set x86_cpuid_level here for now to not confuse. */
1088                 if (xlvl >= 0x80860001)
1089                         c->x86_capability[2] = cpuid_edx(0x80860001);
1090         }
1091
1092         c->apicid = phys_pkg_id(0);
1093
1094         /*
1095          * Vendor-specific initialization.  In this section we
1096          * canonicalize the feature flags, meaning if there are
1097          * features a certain CPU supports which CPUID doesn't
1098          * tell us, CPUID claiming incorrect flags, or other bugs,
1099          * we handle them here.
1100          *
1101          * At the end of this section, c->x86_capability better
1102          * indicate the features this CPU genuinely supports!
1103          */
1104         switch (c->x86_vendor) {
1105         case X86_VENDOR_AMD:
1106                 init_amd(c);
1107                 break;
1108
1109         case X86_VENDOR_INTEL:
1110                 init_intel(c);
1111                 break;
1112
1113         case X86_VENDOR_UNKNOWN:
1114         default:
1115                 display_cacheinfo(c);
1116                 break;
1117         }
1118
1119         select_idle_routine(c);
1120         detect_ht(c); 
1121
1122         /*
1123          * On SMP, boot_cpu_data holds the common feature set between
1124          * all CPUs; so make sure that we indicate which features are
1125          * common between the CPUs.  The first time this routine gets
1126          * executed, c == &boot_cpu_data.
1127          */
1128         if (c != &boot_cpu_data) {
1129                 /* AND the already accumulated flags with these */
1130                 for (i = 0 ; i < NCAPINTS ; i++)
1131                         boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1132         }
1133
1134 #ifdef CONFIG_X86_MCE
1135         mcheck_init(c);
1136 #endif
1137         if (c == &boot_cpu_data)
1138                 mtrr_bp_init();
1139         else
1140                 mtrr_ap_init();
1141 #ifdef CONFIG_NUMA
1142         numa_add_cpu(smp_processor_id());
1143 #endif
1144 }
1145  
1146
1147 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1148 {
1149         if (c->x86_model_id[0])
1150                 printk("%s", c->x86_model_id);
1151
1152         if (c->x86_mask || c->cpuid_level >= 0) 
1153                 printk(" stepping %02x\n", c->x86_mask);
1154         else
1155                 printk("\n");
1156 }
1157
1158 /*
1159  *      Get CPU information for use by the procfs.
1160  */
1161
1162 static int show_cpuinfo(struct seq_file *m, void *v)
1163 {
1164         struct cpuinfo_x86 *c = v;
1165
1166         /* 
1167          * These flag bits must match the definitions in <asm/cpufeature.h>.
1168          * NULL means this bit is undefined or reserved; either way it doesn't
1169          * have meaning as far as Linux is concerned.  Note that it's important
1170          * to realize there is a difference between this table and CPUID -- if
1171          * applications want to get the raw CPUID data, they should access
1172          * /dev/cpu/<cpu_nr>/cpuid instead.
1173          */
1174         static char *x86_cap_flags[] = {
1175                 /* Intel-defined */
1176                 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1177                 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1178                 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1179                 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1180
1181                 /* AMD-defined */
1182                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1183                 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1184                 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1185                 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
1186                 "3dnowext", "3dnow",
1187
1188                 /* Transmeta-defined */
1189                 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1190                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1191                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1192                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1193
1194                 /* Other (Linux-defined) */
1195                 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
1196                 "constant_tsc", NULL, NULL,
1197                 "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1198                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1199                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1200
1201                 /* Intel-defined (#2) */
1202                 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
1203                 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
1204                 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
1205                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1206
1207                 /* VIA/Cyrix/Centaur-defined */
1208                 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1209                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1210                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1211                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1212
1213                 /* AMD-defined (#2) */
1214                 "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy",
1215                 "altmovcr8", "abm", "sse4a",
1216                 "misalignsse", "3dnowprefetch",
1217                 "osvw", "ibs", NULL, NULL, NULL, NULL,
1218                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1219                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1220         };
1221         static char *x86_power_flags[] = { 
1222                 "ts",   /* temperature sensor */
1223                 "fid",  /* frequency id control */
1224                 "vid",  /* voltage id control */
1225                 "ttp",  /* thermal trip */
1226                 "tm",
1227                 "stc",
1228                 "100mhzsteps",
1229                 "hwpstate",
1230                 NULL,   /* tsc invariant mapped to constant_tsc */
1231                 NULL,
1232                 /* nothing */   /* constant_tsc - moved to flags */
1233         };
1234
1235
1236 #ifdef CONFIG_SMP
1237         if (!cpu_online(c-cpu_data))
1238                 return 0;
1239 #endif
1240
1241         seq_printf(m,"processor\t: %u\n"
1242                      "vendor_id\t: %s\n"
1243                      "cpu family\t: %d\n"
1244                      "model\t\t: %d\n"
1245                      "model name\t: %s\n",
1246                      (unsigned)(c-cpu_data),
1247                      c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1248                      c->x86,
1249                      (int)c->x86_model,
1250                      c->x86_model_id[0] ? c->x86_model_id : "unknown");
1251         
1252         if (c->x86_mask || c->cpuid_level >= 0)
1253                 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1254         else
1255                 seq_printf(m, "stepping\t: unknown\n");
1256         
1257         if (cpu_has(c,X86_FEATURE_TSC)) {
1258                 unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
1259                 if (!freq)
1260                         freq = cpu_khz;
1261                 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1262                              freq / 1000, (freq % 1000));
1263         }
1264
1265         /* Cache size */
1266         if (c->x86_cache_size >= 0) 
1267                 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1268         
1269 #ifdef CONFIG_SMP
1270         if (smp_num_siblings * c->x86_max_cores > 1) {
1271                 int cpu = c - cpu_data;
1272                 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1273                 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
1274                 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
1275                 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1276         }
1277 #endif  
1278
1279         seq_printf(m,
1280                 "fpu\t\t: yes\n"
1281                 "fpu_exception\t: yes\n"
1282                 "cpuid level\t: %d\n"
1283                 "wp\t\t: yes\n"
1284                 "flags\t\t:",
1285                    c->cpuid_level);
1286
1287         { 
1288                 int i; 
1289                 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1290                         if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1291                                 seq_printf(m, " %s", x86_cap_flags[i]);
1292         }
1293                 
1294         seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1295                    c->loops_per_jiffy/(500000/HZ),
1296                    (c->loops_per_jiffy/(5000/HZ)) % 100);
1297
1298         if (c->x86_tlbsize > 0) 
1299                 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1300         seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1301         seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1302
1303         seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", 
1304                    c->x86_phys_bits, c->x86_virt_bits);
1305
1306         seq_printf(m, "power management:");
1307         {
1308                 unsigned i;
1309                 for (i = 0; i < 32; i++) 
1310                         if (c->x86_power & (1 << i)) {
1311                                 if (i < ARRAY_SIZE(x86_power_flags) &&
1312                                         x86_power_flags[i])
1313                                         seq_printf(m, "%s%s",
1314                                                 x86_power_flags[i][0]?" ":"",
1315                                                 x86_power_flags[i]);
1316                                 else
1317                                         seq_printf(m, " [%d]", i);
1318                         }
1319         }
1320
1321         seq_printf(m, "\n\n");
1322
1323         return 0;
1324 }
1325
1326 static void *c_start(struct seq_file *m, loff_t *pos)
1327 {
1328         return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1329 }
1330
1331 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1332 {
1333         ++*pos;
1334         return c_start(m, pos);
1335 }
1336
1337 static void c_stop(struct seq_file *m, void *v)
1338 {
1339 }
1340
1341 struct seq_operations cpuinfo_op = {
1342         .start =c_start,
1343         .next = c_next,
1344         .stop = c_stop,
1345         .show = show_cpuinfo,
1346 };