x86: convert to the new dynamic percpu allocator
[linux-flexiantxendom0-3.2.10.git] / arch / x86 / kernel / setup_percpu.c
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
8 #include <linux/smp.h>
9 #include <linux/topology.h>
10 #include <asm/sections.h>
11 #include <asm/processor.h>
12 #include <asm/setup.h>
13 #include <asm/mpspec.h>
14 #include <asm/apicdef.h>
15 #include <asm/highmem.h>
16 #include <asm/proto.h>
17 #include <asm/cpumask.h>
18 #include <asm/cpu.h>
19 #include <asm/stackprotector.h>
20
21 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
22 # define DBG(x...) printk(KERN_DEBUG x)
23 #else
24 # define DBG(x...)
25 #endif
26
27 DEFINE_PER_CPU(int, cpu_number);
28 EXPORT_PER_CPU_SYMBOL(cpu_number);
29
30 #ifdef CONFIG_X86_64
31 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
32 #else
33 #define BOOT_PERCPU_OFFSET 0
34 #endif
35
36 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
37 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
38
39 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
40         [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
41 };
42 EXPORT_SYMBOL(__per_cpu_offset);
43
44 static inline void setup_percpu_segment(int cpu)
45 {
46 #ifdef CONFIG_X86_32
47         struct desc_struct gdt;
48
49         pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
50                         0x2 | DESCTYPE_S, 0x8);
51         gdt.s = 1;
52         write_gdt_entry(get_cpu_gdt_table(cpu),
53                         GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
54 #endif
55 }
56
57 /*
58  * Great future plan:
59  * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
60  * Always point %gs to its beginning
61  */
62 void __init setup_per_cpu_areas(void)
63 {
64         ssize_t size = __per_cpu_end - __per_cpu_start;
65         unsigned int nr_cpu_pages = DIV_ROUND_UP(size, PAGE_SIZE);
66         static struct page **pages;
67         size_t pages_size;
68         unsigned int cpu, i, j;
69         unsigned long delta;
70         size_t pcpu_unit_size;
71
72         pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
73                 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
74         pr_info("PERCPU: Allocating %zd bytes for static per cpu data\n", size);
75
76         pages_size = nr_cpu_pages * num_possible_cpus() * sizeof(pages[0]);
77         pages = alloc_bootmem(pages_size);
78
79         j = 0;
80         for_each_possible_cpu(cpu) {
81                 void *ptr;
82
83                 for (i = 0; i < nr_cpu_pages; i++) {
84 #ifndef CONFIG_NEED_MULTIPLE_NODES
85                         ptr = alloc_bootmem_pages(PAGE_SIZE);
86 #else
87                         int node = early_cpu_to_node(cpu);
88
89                         if (!node_online(node) || !NODE_DATA(node)) {
90                                 ptr = alloc_bootmem_pages(PAGE_SIZE);
91                                 pr_info("cpu %d has no node %d or node-local "
92                                         "memory\n", cpu, node);
93                                 pr_debug("per cpu data for cpu%d at %016lx\n",
94                                          cpu, __pa(ptr));
95                         } else {
96                                 ptr = alloc_bootmem_pages_node(NODE_DATA(node),
97                                                                PAGE_SIZE);
98                                 pr_debug("per cpu data for cpu%d on node%d "
99                                          "at %016lx\n", cpu, node, __pa(ptr));
100                         }
101 #endif
102                         memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
103                         pages[j++] = virt_to_page(ptr);
104                 }
105         }
106
107         pcpu_unit_size = pcpu_setup_static(populate_extra_pte, pages, size);
108
109         free_bootmem(__pa(pages), pages_size);
110
111         delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
112         for_each_possible_cpu(cpu) {
113                 per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
114                 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
115                 per_cpu(cpu_number, cpu) = cpu;
116                 setup_percpu_segment(cpu);
117                 setup_stack_canary_segment(cpu);
118                 /*
119                  * Copy data used in early init routines from the
120                  * initial arrays to the per cpu data areas.  These
121                  * arrays then become expendable and the *_early_ptr's
122                  * are zeroed indicating that the static arrays are
123                  * gone.
124                  */
125 #ifdef CONFIG_X86_LOCAL_APIC
126                 per_cpu(x86_cpu_to_apicid, cpu) =
127                         early_per_cpu_map(x86_cpu_to_apicid, cpu);
128                 per_cpu(x86_bios_cpu_apicid, cpu) =
129                         early_per_cpu_map(x86_bios_cpu_apicid, cpu);
130 #endif
131 #ifdef CONFIG_X86_64
132                 per_cpu(irq_stack_ptr, cpu) =
133                         per_cpu(irq_stack_union.irq_stack, cpu) +
134                         IRQ_STACK_SIZE - 64;
135 #ifdef CONFIG_NUMA
136                 per_cpu(x86_cpu_to_node_map, cpu) =
137                         early_per_cpu_map(x86_cpu_to_node_map, cpu);
138 #endif
139 #endif
140                 /*
141                  * Up to this point, the boot CPU has been using .data.init
142                  * area.  Reload any changed state for the boot CPU.
143                  */
144                 if (cpu == boot_cpu_id)
145                         switch_to_new_gdt(cpu);
146
147                 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
148         }
149
150         /* indicate the early static arrays will soon be gone */
151 #ifdef CONFIG_X86_LOCAL_APIC
152         early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
153         early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
154 #endif
155 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
156         early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
157 #endif
158
159         /* Setup node to cpumask map */
160         setup_node_to_cpumask_map();
161
162         /* Setup cpu initialized, callin, callout masks */
163         setup_cpu_local_masks();
164 }