target: Fix bug in handling of FILEIO + block_device resize ops
[linux-flexiantxendom0-3.2.10.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/of_fdt.h>
24 #include <linux/root_dev.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/fs.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 #include <linux/bug.h>
32 #include <linux/compiler.h>
33 #include <linux/sort.h>
34
35 #include <asm/unified.h>
36 #include <asm/cp15.h>
37 #include <asm/cpu.h>
38 #include <asm/cputype.h>
39 #include <asm/elf.h>
40 #include <asm/procinfo.h>
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43 #include <asm/smp_plat.h>
44 #include <asm/mach-types.h>
45 #include <asm/cacheflush.h>
46 #include <asm/cachetype.h>
47 #include <asm/tlbflush.h>
48
49 #include <asm/prom.h>
50 #include <asm/mach/arch.h>
51 #include <asm/mach/irq.h>
52 #include <asm/mach/time.h>
53 #include <asm/system_info.h>
54 #include <asm/system_misc.h>
55 #include <asm/traps.h>
56 #include <asm/unwind.h>
57 #include <asm/memblock.h>
58
59 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
60 #include "compat.h"
61 #endif
62 #include "atags.h"
63 #include "tcm.h"
64
65 #ifndef MEM_SIZE
66 #define MEM_SIZE        (16*1024*1024)
67 #endif
68
69 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
70 char fpe_type[8];
71
72 static int __init fpe_setup(char *line)
73 {
74         memcpy(fpe_type, line, 8);
75         return 1;
76 }
77
78 __setup("fpe=", fpe_setup);
79 #endif
80
81 extern void paging_init(struct machine_desc *desc);
82 extern void sanity_check_meminfo(void);
83 extern void reboot_setup(char *str);
84
85 unsigned int processor_id;
86 EXPORT_SYMBOL(processor_id);
87 unsigned int __machine_arch_type __read_mostly;
88 EXPORT_SYMBOL(__machine_arch_type);
89 unsigned int cacheid __read_mostly;
90 EXPORT_SYMBOL(cacheid);
91
92 unsigned int __atags_pointer __initdata;
93
94 unsigned int system_rev;
95 EXPORT_SYMBOL(system_rev);
96
97 unsigned int system_serial_low;
98 EXPORT_SYMBOL(system_serial_low);
99
100 unsigned int system_serial_high;
101 EXPORT_SYMBOL(system_serial_high);
102
103 unsigned int elf_hwcap __read_mostly;
104 EXPORT_SYMBOL(elf_hwcap);
105
106
107 #ifdef MULTI_CPU
108 struct processor processor __read_mostly;
109 #endif
110 #ifdef MULTI_TLB
111 struct cpu_tlb_fns cpu_tlb __read_mostly;
112 #endif
113 #ifdef MULTI_USER
114 struct cpu_user_fns cpu_user __read_mostly;
115 #endif
116 #ifdef MULTI_CACHE
117 struct cpu_cache_fns cpu_cache __read_mostly;
118 #endif
119 #ifdef CONFIG_OUTER_CACHE
120 struct outer_cache_fns outer_cache __read_mostly;
121 EXPORT_SYMBOL(outer_cache);
122 #endif
123
124 /*
125  * Cached cpu_architecture() result for use by assembler code.
126  * C code should use the cpu_architecture() function instead of accessing this
127  * variable directly.
128  */
129 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
130
131 struct stack {
132         u32 irq[3];
133         u32 abt[3];
134         u32 und[3];
135 } ____cacheline_aligned;
136
137 static struct stack stacks[NR_CPUS];
138
139 char elf_platform[ELF_PLATFORM_SIZE];
140 EXPORT_SYMBOL(elf_platform);
141
142 static const char *cpu_name;
143 static const char *machine_name;
144 static char __initdata cmd_line[COMMAND_LINE_SIZE];
145 struct machine_desc *machine_desc __initdata;
146
147 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
148 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
149 #define ENDIANNESS ((char)endian_test.l)
150
151 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
152
153 /*
154  * Standard memory resources
155  */
156 static struct resource mem_res[] = {
157         {
158                 .name = "Video RAM",
159                 .start = 0,
160                 .end = 0,
161                 .flags = IORESOURCE_MEM
162         },
163         {
164                 .name = "Kernel code",
165                 .start = 0,
166                 .end = 0,
167                 .flags = IORESOURCE_MEM
168         },
169         {
170                 .name = "Kernel data",
171                 .start = 0,
172                 .end = 0,
173                 .flags = IORESOURCE_MEM
174         }
175 };
176
177 #define video_ram   mem_res[0]
178 #define kernel_code mem_res[1]
179 #define kernel_data mem_res[2]
180
181 static struct resource io_res[] = {
182         {
183                 .name = "reserved",
184                 .start = 0x3bc,
185                 .end = 0x3be,
186                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
187         },
188         {
189                 .name = "reserved",
190                 .start = 0x378,
191                 .end = 0x37f,
192                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
193         },
194         {
195                 .name = "reserved",
196                 .start = 0x278,
197                 .end = 0x27f,
198                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
199         }
200 };
201
202 #define lp0 io_res[0]
203 #define lp1 io_res[1]
204 #define lp2 io_res[2]
205
206 static const char *proc_arch[] = {
207         "undefined/unknown",
208         "3",
209         "4",
210         "4T",
211         "5",
212         "5T",
213         "5TE",
214         "5TEJ",
215         "6TEJ",
216         "7",
217         "?(11)",
218         "?(12)",
219         "?(13)",
220         "?(14)",
221         "?(15)",
222         "?(16)",
223         "?(17)",
224 };
225
226 static int __get_cpu_architecture(void)
227 {
228         int cpu_arch;
229
230         if ((read_cpuid_id() & 0x0008f000) == 0) {
231                 cpu_arch = CPU_ARCH_UNKNOWN;
232         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
233                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
234         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
235                 cpu_arch = (read_cpuid_id() >> 16) & 7;
236                 if (cpu_arch)
237                         cpu_arch += CPU_ARCH_ARMv3;
238         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
239                 unsigned int mmfr0;
240
241                 /* Revised CPUID format. Read the Memory Model Feature
242                  * Register 0 and check for VMSAv7 or PMSAv7 */
243                 asm("mrc        p15, 0, %0, c0, c1, 4"
244                     : "=r" (mmfr0));
245                 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
246                     (mmfr0 & 0x000000f0) >= 0x00000030)
247                         cpu_arch = CPU_ARCH_ARMv7;
248                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
249                          (mmfr0 & 0x000000f0) == 0x00000020)
250                         cpu_arch = CPU_ARCH_ARMv6;
251                 else
252                         cpu_arch = CPU_ARCH_UNKNOWN;
253         } else
254                 cpu_arch = CPU_ARCH_UNKNOWN;
255
256         return cpu_arch;
257 }
258
259 int __pure cpu_architecture(void)
260 {
261         BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
262
263         return __cpu_architecture;
264 }
265
266 static int cpu_has_aliasing_icache(unsigned int arch)
267 {
268         int aliasing_icache;
269         unsigned int id_reg, num_sets, line_size;
270
271         /* PIPT caches never alias. */
272         if (icache_is_pipt())
273                 return 0;
274
275         /* arch specifies the register format */
276         switch (arch) {
277         case CPU_ARCH_ARMv7:
278                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
279                     : /* No output operands */
280                     : "r" (1));
281                 isb();
282                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
283                     : "=r" (id_reg));
284                 line_size = 4 << ((id_reg & 0x7) + 2);
285                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
286                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
287                 break;
288         case CPU_ARCH_ARMv6:
289                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
290                 break;
291         default:
292                 /* I-cache aliases will be handled by D-cache aliasing code */
293                 aliasing_icache = 0;
294         }
295
296         return aliasing_icache;
297 }
298
299 static void __init cacheid_init(void)
300 {
301         unsigned int cachetype = read_cpuid_cachetype();
302         unsigned int arch = cpu_architecture();
303
304         if (arch >= CPU_ARCH_ARMv6) {
305                 if ((cachetype & (7 << 29)) == 4 << 29) {
306                         /* ARMv7 register format */
307                         arch = CPU_ARCH_ARMv7;
308                         cacheid = CACHEID_VIPT_NONALIASING;
309                         switch (cachetype & (3 << 14)) {
310                         case (1 << 14):
311                                 cacheid |= CACHEID_ASID_TAGGED;
312                                 break;
313                         case (3 << 14):
314                                 cacheid |= CACHEID_PIPT;
315                                 break;
316                         }
317                 } else {
318                         arch = CPU_ARCH_ARMv6;
319                         if (cachetype & (1 << 23))
320                                 cacheid = CACHEID_VIPT_ALIASING;
321                         else
322                                 cacheid = CACHEID_VIPT_NONALIASING;
323                 }
324                 if (cpu_has_aliasing_icache(arch))
325                         cacheid |= CACHEID_VIPT_I_ALIASING;
326         } else {
327                 cacheid = CACHEID_VIVT;
328         }
329
330         printk("CPU: %s data cache, %s instruction cache\n",
331                 cache_is_vivt() ? "VIVT" :
332                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
333                 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
334                 cache_is_vivt() ? "VIVT" :
335                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
336                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
337                 icache_is_pipt() ? "PIPT" :
338                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
339 }
340
341 /*
342  * These functions re-use the assembly code in head.S, which
343  * already provide the required functionality.
344  */
345 extern struct proc_info_list *lookup_processor_type(unsigned int);
346
347 void __init early_print(const char *str, ...)
348 {
349         extern void printascii(const char *);
350         char buf[256];
351         va_list ap;
352
353         va_start(ap, str);
354         vsnprintf(buf, sizeof(buf), str, ap);
355         va_end(ap);
356
357 #ifdef CONFIG_DEBUG_LL
358         printascii(buf);
359 #endif
360         printk("%s", buf);
361 }
362
363 static void __init feat_v6_fixup(void)
364 {
365         int id = read_cpuid_id();
366
367         if ((id & 0xff0f0000) != 0x41070000)
368                 return;
369
370         /*
371          * HWCAP_TLS is available only on 1136 r1p0 and later,
372          * see also kuser_get_tls_init.
373          */
374         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
375                 elf_hwcap &= ~HWCAP_TLS;
376 }
377
378 /*
379  * cpu_init - initialise one CPU.
380  *
381  * cpu_init sets up the per-CPU stacks.
382  */
383 void cpu_init(void)
384 {
385         unsigned int cpu = smp_processor_id();
386         struct stack *stk = &stacks[cpu];
387
388         if (cpu >= NR_CPUS) {
389                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
390                 BUG();
391         }
392
393         cpu_proc_init();
394
395         /*
396          * Define the placement constraint for the inline asm directive below.
397          * In Thumb-2, msr with an immediate value is not allowed.
398          */
399 #ifdef CONFIG_THUMB2_KERNEL
400 #define PLC     "r"
401 #else
402 #define PLC     "I"
403 #endif
404
405         /*
406          * setup stacks for re-entrant exception handlers
407          */
408         __asm__ (
409         "msr    cpsr_c, %1\n\t"
410         "add    r14, %0, %2\n\t"
411         "mov    sp, r14\n\t"
412         "msr    cpsr_c, %3\n\t"
413         "add    r14, %0, %4\n\t"
414         "mov    sp, r14\n\t"
415         "msr    cpsr_c, %5\n\t"
416         "add    r14, %0, %6\n\t"
417         "mov    sp, r14\n\t"
418         "msr    cpsr_c, %7"
419             :
420             : "r" (stk),
421               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
422               "I" (offsetof(struct stack, irq[0])),
423               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
424               "I" (offsetof(struct stack, abt[0])),
425               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
426               "I" (offsetof(struct stack, und[0])),
427               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
428             : "r14");
429 }
430
431 int __cpu_logical_map[NR_CPUS];
432
433 void __init smp_setup_processor_id(void)
434 {
435         int i;
436         u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
437
438         cpu_logical_map(0) = cpu;
439         for (i = 1; i < NR_CPUS; ++i)
440                 cpu_logical_map(i) = i == cpu ? 0 : i;
441
442         printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
443 }
444
445 static void __init setup_processor(void)
446 {
447         struct proc_info_list *list;
448
449         /*
450          * locate processor in the list of supported processor
451          * types.  The linker builds this table for us from the
452          * entries in arch/arm/mm/proc-*.S
453          */
454         list = lookup_processor_type(read_cpuid_id());
455         if (!list) {
456                 printk("CPU configuration botched (ID %08x), unable "
457                        "to continue.\n", read_cpuid_id());
458                 while (1);
459         }
460
461         cpu_name = list->cpu_name;
462         __cpu_architecture = __get_cpu_architecture();
463
464 #ifdef MULTI_CPU
465         processor = *list->proc;
466 #endif
467 #ifdef MULTI_TLB
468         cpu_tlb = *list->tlb;
469 #endif
470 #ifdef MULTI_USER
471         cpu_user = *list->user;
472 #endif
473 #ifdef MULTI_CACHE
474         cpu_cache = *list->cache;
475 #endif
476
477         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
478                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
479                proc_arch[cpu_architecture()], cr_alignment);
480
481         snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
482                  list->arch_name, ENDIANNESS);
483         snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
484                  list->elf_name, ENDIANNESS);
485         elf_hwcap = list->elf_hwcap;
486 #ifndef CONFIG_ARM_THUMB
487         elf_hwcap &= ~HWCAP_THUMB;
488 #endif
489
490         feat_v6_fixup();
491
492         cacheid_init();
493         cpu_init();
494 }
495
496 void __init dump_machine_table(void)
497 {
498         struct machine_desc *p;
499
500         early_print("Available machine support:\n\nID (hex)\tNAME\n");
501         for_each_machine_desc(p)
502                 early_print("%08x\t%s\n", p->nr, p->name);
503
504         early_print("\nPlease check your kernel config and/or bootloader.\n");
505
506         while (true)
507                 /* can't use cpu_relax() here as it may require MMU setup */;
508 }
509
510 int __init arm_add_memory(phys_addr_t start, unsigned long size)
511 {
512         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
513
514         if (meminfo.nr_banks >= NR_BANKS) {
515                 printk(KERN_CRIT "NR_BANKS too low, "
516                         "ignoring memory at 0x%08llx\n", (long long)start);
517                 return -EINVAL;
518         }
519
520         /*
521          * Ensure that start/size are aligned to a page boundary.
522          * Size is appropriately rounded down, start is rounded up.
523          */
524         size -= start & ~PAGE_MASK;
525         bank->start = PAGE_ALIGN(start);
526         bank->size  = size & PAGE_MASK;
527
528         /*
529          * Check whether this memory region has non-zero size or
530          * invalid node number.
531          */
532         if (bank->size == 0)
533                 return -EINVAL;
534
535         meminfo.nr_banks++;
536         return 0;
537 }
538
539 /*
540  * Pick out the memory size.  We look for mem=size@start,
541  * where start and size are "size[KkMm]"
542  */
543 static int __init early_mem(char *p)
544 {
545         static int usermem __initdata = 0;
546         unsigned long size;
547         phys_addr_t start;
548         char *endp;
549
550         /*
551          * If the user specifies memory size, we
552          * blow away any automatically generated
553          * size.
554          */
555         if (usermem == 0) {
556                 usermem = 1;
557                 meminfo.nr_banks = 0;
558         }
559
560         start = PHYS_OFFSET;
561         size  = memparse(p, &endp);
562         if (*endp == '@')
563                 start = memparse(endp + 1, NULL);
564
565         arm_add_memory(start, size);
566
567         return 0;
568 }
569 early_param("mem", early_mem);
570
571 static void __init
572 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
573 {
574 #ifdef CONFIG_BLK_DEV_RAM
575         extern int rd_size, rd_image_start, rd_prompt, rd_doload;
576
577         rd_image_start = image_start;
578         rd_prompt = prompt;
579         rd_doload = doload;
580
581         if (rd_sz)
582                 rd_size = rd_sz;
583 #endif
584 }
585
586 static void __init request_standard_resources(struct machine_desc *mdesc)
587 {
588         struct memblock_region *region;
589         struct resource *res;
590
591         kernel_code.start   = virt_to_phys(_text);
592         kernel_code.end     = virt_to_phys(_etext - 1);
593         kernel_data.start   = virt_to_phys(_sdata);
594         kernel_data.end     = virt_to_phys(_end - 1);
595
596         for_each_memblock(memory, region) {
597                 res = alloc_bootmem_low(sizeof(*res));
598                 res->name  = "System RAM";
599                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
600                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
601                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
602
603                 request_resource(&iomem_resource, res);
604
605                 if (kernel_code.start >= res->start &&
606                     kernel_code.end <= res->end)
607                         request_resource(res, &kernel_code);
608                 if (kernel_data.start >= res->start &&
609                     kernel_data.end <= res->end)
610                         request_resource(res, &kernel_data);
611         }
612
613         if (mdesc->video_start) {
614                 video_ram.start = mdesc->video_start;
615                 video_ram.end   = mdesc->video_end;
616                 request_resource(&iomem_resource, &video_ram);
617         }
618
619         /*
620          * Some machines don't have the possibility of ever
621          * possessing lp0, lp1 or lp2
622          */
623         if (mdesc->reserve_lp0)
624                 request_resource(&ioport_resource, &lp0);
625         if (mdesc->reserve_lp1)
626                 request_resource(&ioport_resource, &lp1);
627         if (mdesc->reserve_lp2)
628                 request_resource(&ioport_resource, &lp2);
629 }
630
631 /*
632  *  Tag parsing.
633  *
634  * This is the new way of passing data to the kernel at boot time.  Rather
635  * than passing a fixed inflexible structure to the kernel, we pass a list
636  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
637  * tag for the list to be recognised (to distinguish the tagged list from
638  * a param_struct).  The list is terminated with a zero-length tag (this tag
639  * is not parsed in any way).
640  */
641 static int __init parse_tag_core(const struct tag *tag)
642 {
643         if (tag->hdr.size > 2) {
644                 if ((tag->u.core.flags & 1) == 0)
645                         root_mountflags &= ~MS_RDONLY;
646                 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
647         }
648         return 0;
649 }
650
651 __tagtable(ATAG_CORE, parse_tag_core);
652
653 static int __init parse_tag_mem32(const struct tag *tag)
654 {
655         return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
656 }
657
658 __tagtable(ATAG_MEM, parse_tag_mem32);
659
660 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
661 struct screen_info screen_info = {
662  .orig_video_lines      = 30,
663  .orig_video_cols       = 80,
664  .orig_video_mode       = 0,
665  .orig_video_ega_bx     = 0,
666  .orig_video_isVGA      = 1,
667  .orig_video_points     = 8
668 };
669
670 static int __init parse_tag_videotext(const struct tag *tag)
671 {
672         screen_info.orig_x            = tag->u.videotext.x;
673         screen_info.orig_y            = tag->u.videotext.y;
674         screen_info.orig_video_page   = tag->u.videotext.video_page;
675         screen_info.orig_video_mode   = tag->u.videotext.video_mode;
676         screen_info.orig_video_cols   = tag->u.videotext.video_cols;
677         screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
678         screen_info.orig_video_lines  = tag->u.videotext.video_lines;
679         screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
680         screen_info.orig_video_points = tag->u.videotext.video_points;
681         return 0;
682 }
683
684 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
685 #endif
686
687 static int __init parse_tag_ramdisk(const struct tag *tag)
688 {
689         setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
690                       (tag->u.ramdisk.flags & 2) == 0,
691                       tag->u.ramdisk.start, tag->u.ramdisk.size);
692         return 0;
693 }
694
695 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
696
697 static int __init parse_tag_serialnr(const struct tag *tag)
698 {
699         system_serial_low = tag->u.serialnr.low;
700         system_serial_high = tag->u.serialnr.high;
701         return 0;
702 }
703
704 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
705
706 static int __init parse_tag_revision(const struct tag *tag)
707 {
708         system_rev = tag->u.revision.rev;
709         return 0;
710 }
711
712 __tagtable(ATAG_REVISION, parse_tag_revision);
713
714 static int __init parse_tag_cmdline(const struct tag *tag)
715 {
716 #if defined(CONFIG_CMDLINE_EXTEND)
717         strlcat(default_command_line, " ", COMMAND_LINE_SIZE);
718         strlcat(default_command_line, tag->u.cmdline.cmdline,
719                 COMMAND_LINE_SIZE);
720 #elif defined(CONFIG_CMDLINE_FORCE)
721         pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
722 #else
723         strlcpy(default_command_line, tag->u.cmdline.cmdline,
724                 COMMAND_LINE_SIZE);
725 #endif
726         return 0;
727 }
728
729 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
730
731 /*
732  * Scan the tag table for this tag, and call its parse function.
733  * The tag table is built by the linker from all the __tagtable
734  * declarations.
735  */
736 static int __init parse_tag(const struct tag *tag)
737 {
738         extern struct tagtable __tagtable_begin, __tagtable_end;
739         struct tagtable *t;
740
741         for (t = &__tagtable_begin; t < &__tagtable_end; t++)
742                 if (tag->hdr.tag == t->tag) {
743                         t->parse(tag);
744                         break;
745                 }
746
747         return t < &__tagtable_end;
748 }
749
750 /*
751  * Parse all tags in the list, checking both the global and architecture
752  * specific tag tables.
753  */
754 static void __init parse_tags(const struct tag *t)
755 {
756         for (; t->hdr.size; t = tag_next(t))
757                 if (!parse_tag(t))
758                         printk(KERN_WARNING
759                                 "Ignoring unrecognised tag 0x%08x\n",
760                                 t->hdr.tag);
761 }
762
763 /*
764  * This holds our defaults.
765  */
766 static struct init_tags {
767         struct tag_header hdr1;
768         struct tag_core   core;
769         struct tag_header hdr2;
770         struct tag_mem32  mem;
771         struct tag_header hdr3;
772 } init_tags __initdata = {
773         { tag_size(tag_core), ATAG_CORE },
774         { 1, PAGE_SIZE, 0xff },
775         { tag_size(tag_mem32), ATAG_MEM },
776         { MEM_SIZE },
777         { 0, ATAG_NONE }
778 };
779
780 static int __init customize_machine(void)
781 {
782         /* customizes platform devices, or adds new ones */
783         if (machine_desc->init_machine)
784                 machine_desc->init_machine();
785         return 0;
786 }
787 arch_initcall(customize_machine);
788
789 #ifdef CONFIG_KEXEC
790 static inline unsigned long long get_total_mem(void)
791 {
792         unsigned long total;
793
794         total = max_low_pfn - min_low_pfn;
795         return total << PAGE_SHIFT;
796 }
797
798 /**
799  * reserve_crashkernel() - reserves memory are for crash kernel
800  *
801  * This function reserves memory area given in "crashkernel=" kernel command
802  * line parameter. The memory reserved is used by a dump capture kernel when
803  * primary kernel is crashing.
804  */
805 static void __init reserve_crashkernel(void)
806 {
807         unsigned long long crash_size, crash_base;
808         unsigned long long total_mem;
809         int ret;
810
811         total_mem = get_total_mem();
812         ret = parse_crashkernel(boot_command_line, total_mem,
813                                 &crash_size, &crash_base);
814         if (ret)
815                 return;
816
817         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
818         if (ret < 0) {
819                 printk(KERN_WARNING "crashkernel reservation failed - "
820                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
821                 return;
822         }
823
824         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
825                "for crashkernel (System RAM: %ldMB)\n",
826                (unsigned long)(crash_size >> 20),
827                (unsigned long)(crash_base >> 20),
828                (unsigned long)(total_mem >> 20));
829
830         crashk_res.start = crash_base;
831         crashk_res.end = crash_base + crash_size - 1;
832         insert_resource(&iomem_resource, &crashk_res);
833 }
834 #else
835 static inline void reserve_crashkernel(void) {}
836 #endif /* CONFIG_KEXEC */
837
838 static void __init squash_mem_tags(struct tag *tag)
839 {
840         for (; tag->hdr.size; tag = tag_next(tag))
841                 if (tag->hdr.tag == ATAG_MEM)
842                         tag->hdr.tag = ATAG_NONE;
843 }
844
845 static struct machine_desc * __init setup_machine_tags(unsigned int nr)
846 {
847         struct tag *tags = (struct tag *)&init_tags;
848         struct machine_desc *mdesc = NULL, *p;
849         char *from = default_command_line;
850
851         init_tags.mem.start = PHYS_OFFSET;
852
853         /*
854          * locate machine in the list of supported machines.
855          */
856         for_each_machine_desc(p)
857                 if (nr == p->nr) {
858                         printk("Machine: %s\n", p->name);
859                         mdesc = p;
860                         break;
861                 }
862
863         if (!mdesc) {
864                 early_print("\nError: unrecognized/unsupported machine ID"
865                         " (r1 = 0x%08x).\n\n", nr);
866                 dump_machine_table(); /* does not return */
867         }
868
869         if (__atags_pointer)
870                 tags = phys_to_virt(__atags_pointer);
871         else if (mdesc->atag_offset)
872                 tags = (void *)(PAGE_OFFSET + mdesc->atag_offset);
873
874 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
875         /*
876          * If we have the old style parameters, convert them to
877          * a tag list.
878          */
879         if (tags->hdr.tag != ATAG_CORE)
880                 convert_to_tag_list(tags);
881 #endif
882
883         if (tags->hdr.tag != ATAG_CORE) {
884 #if defined(CONFIG_OF)
885                 /*
886                  * If CONFIG_OF is set, then assume this is a reasonably
887                  * modern system that should pass boot parameters
888                  */
889                 early_print("Warning: Neither atags nor dtb found\n");
890 #endif
891                 tags = (struct tag *)&init_tags;
892         }
893
894         if (mdesc->fixup)
895                 mdesc->fixup(tags, &from, &meminfo);
896
897         if (tags->hdr.tag == ATAG_CORE) {
898                 if (meminfo.nr_banks != 0)
899                         squash_mem_tags(tags);
900                 save_atags(tags);
901                 parse_tags(tags);
902         }
903
904         /* parse_early_param needs a boot_command_line */
905         strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
906
907         return mdesc;
908 }
909
910 static int __init meminfo_cmp(const void *_a, const void *_b)
911 {
912         const struct membank *a = _a, *b = _b;
913         long cmp = bank_pfn_start(a) - bank_pfn_start(b);
914         return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
915 }
916
917 void __init setup_arch(char **cmdline_p)
918 {
919         struct machine_desc *mdesc;
920
921         setup_processor();
922         mdesc = setup_machine_fdt(__atags_pointer);
923         if (!mdesc)
924                 mdesc = setup_machine_tags(machine_arch_type);
925         machine_desc = mdesc;
926         machine_name = mdesc->name;
927
928 #ifdef CONFIG_ZONE_DMA
929         if (mdesc->dma_zone_size) {
930                 extern unsigned long arm_dma_zone_size;
931                 arm_dma_zone_size = mdesc->dma_zone_size;
932         }
933 #endif
934         if (mdesc->restart_mode)
935                 reboot_setup(&mdesc->restart_mode);
936
937         init_mm.start_code = (unsigned long) _text;
938         init_mm.end_code   = (unsigned long) _etext;
939         init_mm.end_data   = (unsigned long) _edata;
940         init_mm.brk        = (unsigned long) _end;
941
942         /* populate cmd_line too for later use, preserving boot_command_line */
943         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
944         *cmdline_p = cmd_line;
945
946         parse_early_param();
947
948         sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
949         sanity_check_meminfo();
950         arm_memblock_init(&meminfo, mdesc);
951
952         paging_init(mdesc);
953         request_standard_resources(mdesc);
954
955         if (mdesc->restart)
956                 arm_pm_restart = mdesc->restart;
957
958         unflatten_device_tree();
959
960 #ifdef CONFIG_SMP
961         if (is_smp())
962                 smp_init_cpus();
963 #endif
964         reserve_crashkernel();
965
966         tcm_init();
967
968 #ifdef CONFIG_MULTI_IRQ_HANDLER
969         handle_arch_irq = mdesc->handle_irq;
970 #endif
971
972 #ifdef CONFIG_VT
973 #if defined(CONFIG_VGA_CONSOLE)
974         conswitchp = &vga_con;
975 #elif defined(CONFIG_DUMMY_CONSOLE)
976         conswitchp = &dummy_con;
977 #endif
978 #endif
979
980         if (mdesc->init_early)
981                 mdesc->init_early();
982 }
983
984
985 static int __init topology_init(void)
986 {
987         int cpu;
988
989         for_each_possible_cpu(cpu) {
990                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
991                 cpuinfo->cpu.hotpluggable = 1;
992                 register_cpu(&cpuinfo->cpu, cpu);
993         }
994
995         return 0;
996 }
997 subsys_initcall(topology_init);
998
999 #ifdef CONFIG_HAVE_PROC_CPU
1000 static int __init proc_cpu_init(void)
1001 {
1002         struct proc_dir_entry *res;
1003
1004         res = proc_mkdir("cpu", NULL);
1005         if (!res)
1006                 return -ENOMEM;
1007         return 0;
1008 }
1009 fs_initcall(proc_cpu_init);
1010 #endif
1011
1012 static const char *hwcap_str[] = {
1013         "swp",
1014         "half",
1015         "thumb",
1016         "26bit",
1017         "fastmult",
1018         "fpa",
1019         "vfp",
1020         "edsp",
1021         "java",
1022         "iwmmxt",
1023         "crunch",
1024         "thumbee",
1025         "neon",
1026         "vfpv3",
1027         "vfpv3d16",
1028         "tls",
1029         "vfpv4",
1030         "idiva",
1031         "idivt",
1032         NULL
1033 };
1034
1035 static int c_show(struct seq_file *m, void *v)
1036 {
1037         int i;
1038
1039         seq_printf(m, "Processor\t: %s rev %d (%s)\n",
1040                    cpu_name, read_cpuid_id() & 15, elf_platform);
1041
1042 #if defined(CONFIG_SMP)
1043         for_each_online_cpu(i) {
1044                 /*
1045                  * glibc reads /proc/cpuinfo to determine the number of
1046                  * online processors, looking for lines beginning with
1047                  * "processor".  Give glibc what it expects.
1048                  */
1049                 seq_printf(m, "processor\t: %d\n", i);
1050                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
1051                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1052                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1053         }
1054 #else /* CONFIG_SMP */
1055         seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1056                    loops_per_jiffy / (500000/HZ),
1057                    (loops_per_jiffy / (5000/HZ)) % 100);
1058 #endif
1059
1060         /* dump out the processor features */
1061         seq_puts(m, "Features\t: ");
1062
1063         for (i = 0; hwcap_str[i]; i++)
1064                 if (elf_hwcap & (1 << i))
1065                         seq_printf(m, "%s ", hwcap_str[i]);
1066
1067         seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1068         seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
1069
1070         if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1071                 /* pre-ARM7 */
1072                 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1073         } else {
1074                 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1075                         /* ARM7 */
1076                         seq_printf(m, "CPU variant\t: 0x%02x\n",
1077                                    (read_cpuid_id() >> 16) & 127);
1078                 } else {
1079                         /* post-ARM7 */
1080                         seq_printf(m, "CPU variant\t: 0x%x\n",
1081                                    (read_cpuid_id() >> 20) & 15);
1082                 }
1083                 seq_printf(m, "CPU part\t: 0x%03x\n",
1084                            (read_cpuid_id() >> 4) & 0xfff);
1085         }
1086         seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1087
1088         seq_puts(m, "\n");
1089
1090         seq_printf(m, "Hardware\t: %s\n", machine_name);
1091         seq_printf(m, "Revision\t: %04x\n", system_rev);
1092         seq_printf(m, "Serial\t\t: %08x%08x\n",
1093                    system_serial_high, system_serial_low);
1094
1095         return 0;
1096 }
1097
1098 static void *c_start(struct seq_file *m, loff_t *pos)
1099 {
1100         return *pos < 1 ? (void *)1 : NULL;
1101 }
1102
1103 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1104 {
1105         ++*pos;
1106         return NULL;
1107 }
1108
1109 static void c_stop(struct seq_file *m, void *v)
1110 {
1111 }
1112
1113 const struct seq_operations cpuinfo_op = {
1114         .start  = c_start,
1115         .next   = c_next,
1116         .stop   = c_stop,
1117         .show   = c_show
1118 };