x86: prepare setup_pcpu_lpage() for pageattr fix
[linux-flexiantxendom0-3.2.10.git] / arch / x86 / kernel / setup_percpu.c
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
8 #include <linux/smp.h>
9 #include <linux/topology.h>
10 #include <linux/pfn.h>
11 #include <asm/sections.h>
12 #include <asm/processor.h>
13 #include <asm/setup.h>
14 #include <asm/mpspec.h>
15 #include <asm/apicdef.h>
16 #include <asm/highmem.h>
17 #include <asm/proto.h>
18 #include <asm/cpumask.h>
19 #include <asm/cpu.h>
20 #include <asm/stackprotector.h>
21
22 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
23 # define DBG(x...) printk(KERN_DEBUG x)
24 #else
25 # define DBG(x...)
26 #endif
27
28 DEFINE_PER_CPU(int, cpu_number);
29 EXPORT_PER_CPU_SYMBOL(cpu_number);
30
31 #ifdef CONFIG_X86_64
32 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
33 #else
34 #define BOOT_PERCPU_OFFSET 0
35 #endif
36
37 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
38 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
39
40 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
41         [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
42 };
43 EXPORT_SYMBOL(__per_cpu_offset);
44
45 /*
46  * On x86_64 symbols referenced from code should be reachable using
47  * 32bit relocations.  Reserve space for static percpu variables in
48  * modules so that they are always served from the first chunk which
49  * is located at the percpu segment base.  On x86_32, anything can
50  * address anywhere.  No need to reserve space in the first chunk.
51  */
52 #ifdef CONFIG_X86_64
53 #define PERCPU_FIRST_CHUNK_RESERVE      PERCPU_MODULE_RESERVE
54 #else
55 #define PERCPU_FIRST_CHUNK_RESERVE      0
56 #endif
57
58 /**
59  * pcpu_need_numa - determine percpu allocation needs to consider NUMA
60  *
61  * If NUMA is not configured or there is only one NUMA node available,
62  * there is no reason to consider NUMA.  This function determines
63  * whether percpu allocation should consider NUMA or not.
64  *
65  * RETURNS:
66  * true if NUMA should be considered; otherwise, false.
67  */
68 static bool __init pcpu_need_numa(void)
69 {
70 #ifdef CONFIG_NEED_MULTIPLE_NODES
71         pg_data_t *last = NULL;
72         unsigned int cpu;
73
74         for_each_possible_cpu(cpu) {
75                 int node = early_cpu_to_node(cpu);
76
77                 if (node_online(node) && NODE_DATA(node) &&
78                     last && last != NODE_DATA(node))
79                         return true;
80
81                 last = NODE_DATA(node);
82         }
83 #endif
84         return false;
85 }
86
87 /**
88  * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
89  * @cpu: cpu to allocate for
90  * @size: size allocation in bytes
91  * @align: alignment
92  *
93  * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
94  * does the right thing for NUMA regardless of the current
95  * configuration.
96  *
97  * RETURNS:
98  * Pointer to the allocated area on success, NULL on failure.
99  */
100 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
101                                         unsigned long align)
102 {
103         const unsigned long goal = __pa(MAX_DMA_ADDRESS);
104 #ifdef CONFIG_NEED_MULTIPLE_NODES
105         int node = early_cpu_to_node(cpu);
106         void *ptr;
107
108         if (!node_online(node) || !NODE_DATA(node)) {
109                 ptr = __alloc_bootmem_nopanic(size, align, goal);
110                 pr_info("cpu %d has no node %d or node-local memory\n",
111                         cpu, node);
112                 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
113                          cpu, size, __pa(ptr));
114         } else {
115                 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
116                                                    size, align, goal);
117                 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
118                          "%016lx\n", cpu, size, node, __pa(ptr));
119         }
120         return ptr;
121 #else
122         return __alloc_bootmem_nopanic(size, align, goal);
123 #endif
124 }
125
126 /*
127  * Large page remap allocator
128  *
129  * This allocator uses PMD page as unit.  A PMD page is allocated for
130  * each cpu and each is remapped into vmalloc area using PMD mapping.
131  * As PMD page is quite large, only part of it is used for the first
132  * chunk.  Unused part is returned to the bootmem allocator.
133  *
134  * So, the PMD pages are mapped twice - once to the physical mapping
135  * and to the vmalloc area for the first percpu chunk.  The double
136  * mapping does add one more PMD TLB entry pressure but still is much
137  * better than only using 4k mappings while still being NUMA friendly.
138  */
139 #ifdef CONFIG_NEED_MULTIPLE_NODES
140 struct pcpul_ent {
141         unsigned int    cpu;
142         void            *ptr;
143 };
144
145 static size_t pcpul_size __initdata;
146 static struct pcpul_ent *pcpul_map __initdata;
147 static struct vm_struct pcpul_vm;
148
149 static struct page * __init pcpul_get_page(unsigned int cpu, int pageno)
150 {
151         size_t off = (size_t)pageno << PAGE_SHIFT;
152
153         if (off >= pcpul_size)
154                 return NULL;
155
156         return virt_to_page(pcpul_map[cpu].ptr + off);
157 }
158
159 static ssize_t __init setup_pcpu_lpage(size_t static_size)
160 {
161         size_t map_size, dyn_size;
162         unsigned int cpu;
163         ssize_t ret;
164
165         /*
166          * If large page isn't supported, there's no benefit in doing
167          * this.  Also, on non-NUMA, embedding is better.
168          *
169          * NOTE: disabled for now.
170          */
171         if (true || !cpu_has_pse || !pcpu_need_numa())
172                 return -EINVAL;
173
174         /*
175          * Currently supports only single page.  Supporting multiple
176          * pages won't be too difficult if it ever becomes necessary.
177          */
178         pcpul_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
179                                PERCPU_DYNAMIC_RESERVE);
180         if (pcpul_size > PMD_SIZE) {
181                 pr_warning("PERCPU: static data is larger than large page, "
182                            "can't use large page\n");
183                 return -EINVAL;
184         }
185         dyn_size = pcpul_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
186
187         /* allocate pointer array and alloc large pages */
188         map_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpul_map[0]));
189         pcpul_map = alloc_bootmem(map_size);
190
191         for_each_possible_cpu(cpu) {
192                 pcpul_map[cpu].cpu = cpu;
193                 pcpul_map[cpu].ptr = pcpu_alloc_bootmem(cpu, PMD_SIZE,
194                                                         PMD_SIZE);
195                 if (!pcpul_map[cpu].ptr)
196                         goto enomem;
197
198                 /*
199                  * Only use pcpul_size bytes and give back the rest.
200                  *
201                  * Ingo: The 2MB up-rounding bootmem is needed to make
202                  * sure the partial 2MB page is still fully RAM - it's
203                  * not well-specified to have a PAT-incompatible area
204                  * (unmapped RAM, device memory, etc.) in that hole.
205                  */
206                 free_bootmem(__pa(pcpul_map[cpu].ptr + pcpul_size),
207                              PMD_SIZE - pcpul_size);
208
209                 memcpy(pcpul_map[cpu].ptr, __per_cpu_load, static_size);
210         }
211
212         /* allocate address and map */
213         pcpul_vm.flags = VM_ALLOC;
214         pcpul_vm.size = num_possible_cpus() * PMD_SIZE;
215         vm_area_register_early(&pcpul_vm, PMD_SIZE);
216
217         for_each_possible_cpu(cpu) {
218                 pmd_t *pmd, pmd_v;
219
220                 pmd = populate_extra_pmd((unsigned long)pcpul_vm.addr +
221                                          cpu * PMD_SIZE);
222                 pmd_v = pfn_pmd(page_to_pfn(virt_to_page(pcpul_map[cpu].ptr)),
223                                 PAGE_KERNEL_LARGE);
224                 set_pmd(pmd, pmd_v);
225         }
226
227         /* we're ready, commit */
228         pr_info("PERCPU: Remapped at %p with large pages, static data "
229                 "%zu bytes\n", pcpul_vm.addr, static_size);
230
231         ret = pcpu_setup_first_chunk(pcpul_get_page, static_size,
232                                      PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
233                                      PMD_SIZE, pcpul_vm.addr, NULL);
234         goto out_free_map;
235
236 enomem:
237         for_each_possible_cpu(cpu)
238                 if (pcpul_map[cpu].ptr)
239                         free_bootmem(__pa(pcpul_map[cpu].ptr), pcpul_size);
240         ret = -ENOMEM;
241 out_free_map:
242         free_bootmem(__pa(pcpul_map), map_size);
243         return ret;
244 }
245 #else
246 static ssize_t __init setup_pcpu_lpage(size_t static_size)
247 {
248         return -EINVAL;
249 }
250 #endif
251
252 /*
253  * Embedding allocator
254  *
255  * The first chunk is sized to just contain the static area plus
256  * module and dynamic reserves and embedded into linear physical
257  * mapping so that it can use PMD mapping without additional TLB
258  * pressure.
259  */
260 static ssize_t __init setup_pcpu_embed(size_t static_size)
261 {
262         size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
263
264         /*
265          * If large page isn't supported, there's no benefit in doing
266          * this.  Also, embedding allocation doesn't play well with
267          * NUMA.
268          */
269         if (!cpu_has_pse || pcpu_need_numa())
270                 return -EINVAL;
271
272         return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
273                                       reserve - PERCPU_FIRST_CHUNK_RESERVE, -1);
274 }
275
276 /*
277  * 4k page allocator
278  *
279  * This is the basic allocator.  Static percpu area is allocated
280  * page-by-page and most of initialization is done by the generic
281  * setup function.
282  */
283 static struct page **pcpu4k_pages __initdata;
284 static int pcpu4k_nr_static_pages __initdata;
285
286 static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
287 {
288         if (pageno < pcpu4k_nr_static_pages)
289                 return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
290         return NULL;
291 }
292
293 static void __init pcpu4k_populate_pte(unsigned long addr)
294 {
295         populate_extra_pte(addr);
296 }
297
298 static ssize_t __init setup_pcpu_4k(size_t static_size)
299 {
300         size_t pages_size;
301         unsigned int cpu;
302         int i, j;
303         ssize_t ret;
304
305         pcpu4k_nr_static_pages = PFN_UP(static_size);
306
307         /* unaligned allocations can't be freed, round up to page size */
308         pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
309                                * sizeof(pcpu4k_pages[0]));
310         pcpu4k_pages = alloc_bootmem(pages_size);
311
312         /* allocate and copy */
313         j = 0;
314         for_each_possible_cpu(cpu)
315                 for (i = 0; i < pcpu4k_nr_static_pages; i++) {
316                         void *ptr;
317
318                         ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
319                         if (!ptr)
320                                 goto enomem;
321
322                         memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
323                         pcpu4k_pages[j++] = virt_to_page(ptr);
324                 }
325
326         /* we're ready, commit */
327         pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
328                 pcpu4k_nr_static_pages, static_size);
329
330         ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
331                                      PERCPU_FIRST_CHUNK_RESERVE, -1,
332                                      -1, NULL, pcpu4k_populate_pte);
333         goto out_free_ar;
334
335 enomem:
336         while (--j >= 0)
337                 free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
338         ret = -ENOMEM;
339 out_free_ar:
340         free_bootmem(__pa(pcpu4k_pages), pages_size);
341         return ret;
342 }
343
344 static inline void setup_percpu_segment(int cpu)
345 {
346 #ifdef CONFIG_X86_32
347         struct desc_struct gdt;
348
349         pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
350                         0x2 | DESCTYPE_S, 0x8);
351         gdt.s = 1;
352         write_gdt_entry(get_cpu_gdt_table(cpu),
353                         GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
354 #endif
355 }
356
357 /*
358  * Great future plan:
359  * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
360  * Always point %gs to its beginning
361  */
362 void __init setup_per_cpu_areas(void)
363 {
364         size_t static_size = __per_cpu_end - __per_cpu_start;
365         unsigned int cpu;
366         unsigned long delta;
367         size_t pcpu_unit_size;
368         ssize_t ret;
369
370         pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
371                 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
372
373         /*
374          * Allocate percpu area.  If PSE is supported, try to make use
375          * of large page mappings.  Please read comments on top of
376          * each allocator for details.
377          */
378         ret = setup_pcpu_lpage(static_size);
379         if (ret < 0)
380                 ret = setup_pcpu_embed(static_size);
381         if (ret < 0)
382                 ret = setup_pcpu_4k(static_size);
383         if (ret < 0)
384                 panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
385                       static_size, ret);
386
387         pcpu_unit_size = ret;
388
389         /* alrighty, percpu areas up and running */
390         delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
391         for_each_possible_cpu(cpu) {
392                 per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
393                 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
394                 per_cpu(cpu_number, cpu) = cpu;
395                 setup_percpu_segment(cpu);
396                 setup_stack_canary_segment(cpu);
397                 /*
398                  * Copy data used in early init routines from the
399                  * initial arrays to the per cpu data areas.  These
400                  * arrays then become expendable and the *_early_ptr's
401                  * are zeroed indicating that the static arrays are
402                  * gone.
403                  */
404 #ifdef CONFIG_X86_LOCAL_APIC
405                 per_cpu(x86_cpu_to_apicid, cpu) =
406                         early_per_cpu_map(x86_cpu_to_apicid, cpu);
407                 per_cpu(x86_bios_cpu_apicid, cpu) =
408                         early_per_cpu_map(x86_bios_cpu_apicid, cpu);
409 #endif
410 #ifdef CONFIG_X86_64
411                 per_cpu(irq_stack_ptr, cpu) =
412                         per_cpu(irq_stack_union.irq_stack, cpu) +
413                         IRQ_STACK_SIZE - 64;
414 #ifdef CONFIG_NUMA
415                 per_cpu(x86_cpu_to_node_map, cpu) =
416                         early_per_cpu_map(x86_cpu_to_node_map, cpu);
417 #endif
418 #endif
419                 /*
420                  * Up to this point, the boot CPU has been using .data.init
421                  * area.  Reload any changed state for the boot CPU.
422                  */
423                 if (cpu == boot_cpu_id)
424                         switch_to_new_gdt(cpu);
425         }
426
427         /* indicate the early static arrays will soon be gone */
428 #ifdef CONFIG_X86_LOCAL_APIC
429         early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
430         early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
431 #endif
432 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
433         early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
434 #endif
435
436 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
437         /*
438          * make sure boot cpu node_number is right, when boot cpu is on the
439          * node that doesn't have mem installed
440          */
441         per_cpu(node_number, boot_cpu_id) = cpu_to_node(boot_cpu_id);
442 #endif
443
444         /* Setup node to cpumask map */
445         setup_node_to_cpumask_map();
446
447         /* Setup cpu initialized, callin, callout masks */
448         setup_cpu_local_masks();
449 }