Added patch headers.
[linux-flexiantxendom0-3.2.10.git] / arch / x86 / mm / init-xen.c
1 #include <linux/gfp.h>
2 #include <linux/initrd.h>
3 #include <linux/ioport.h>
4 #include <linux/swap.h>
5 #include <linux/bootmem.h>
6
7 #include <asm/cacheflush.h>
8 #include <asm/e820.h>
9 #include <asm/init.h>
10 #include <asm/page.h>
11 #include <asm/page_types.h>
12 #include <asm/sections.h>
13 #include <asm/setup.h>
14 #include <asm/system.h>
15 #include <asm/tlbflush.h>
16 #include <asm/tlb.h>
17 #include <asm/proto.h>
18
19 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
20
21 unsigned long __meminitdata e820_table_start;
22 unsigned long __meminitdata e820_table_end;
23 unsigned long __meminitdata e820_table_top;
24
25 int after_bootmem;
26
27 #if !defined(CONFIG_XEN)
28 int direct_gbpages
29 #ifdef CONFIG_DIRECT_GBPAGES
30                                 = 1
31 #endif
32 ;
33 #elif defined(CONFIG_X86_32)
34 #define direct_gbpages 0
35 extern unsigned long extend_init_mapping(unsigned long tables_space);
36 #else
37 extern void xen_finish_init_mapping(void);
38 #endif
39
40 static void __init find_early_table_space(unsigned long end, int use_pse,
41                                           int use_gbpages)
42 {
43         unsigned long puds, pmds, ptes, tables;
44
45         puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
46         tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
47
48         if (use_gbpages) {
49                 unsigned long extra;
50
51                 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
52                 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
53         } else
54                 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
55
56         tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
57
58         if (use_pse) {
59                 unsigned long extra;
60
61                 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
62 #ifdef CONFIG_X86_32
63                 extra += PMD_SIZE;
64 #endif
65                 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
66         } else
67                 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
68
69         tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
70
71 #ifdef CONFIG_X86_32
72         /* for fixmap */
73         tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
74 #endif
75
76         /*
77          * RED-PEN putting page tables only on node 0 could
78          * cause a hotspot and fill up ZONE_DMA. The page tables
79          * need roughly 0.5KB per GB.
80          */
81 #ifdef CONFIG_X86_32
82         e820_table_start = extend_init_mapping(tables);
83         e820_table_end = e820_table_start;
84 #else /* CONFIG_X86_64 */
85         if (!e820_table_top) {
86                 e820_table_start = (__pa(xen_start_info->pt_base) >> PAGE_SHIFT) +
87                         xen_start_info->nr_pt_frames;
88                 e820_table_end = e820_table_start;
89         } else {
90                 /*
91                  * [table_start, table_top) gets passed to reserve_early(),
92                  * so we must not use table_end here, despite continuing
93                  * to allocate from there. table_end possibly being below
94                  * table_start is otoh not a problem.
95                  */
96                 e820_table_start = e820_table_top;
97         }
98 #endif
99         if (e820_table_start == -1UL)
100                 panic("Cannot find space for the kernel page tables");
101
102         e820_table_top = e820_table_start + (tables >> PAGE_SHIFT);
103
104         printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
105                 end, e820_table_start << PAGE_SHIFT, e820_table_top << PAGE_SHIFT);
106 }
107
108 struct map_range {
109         unsigned long start;
110         unsigned long end;
111         unsigned page_size_mask;
112 };
113
114 #ifdef CONFIG_X86_32
115 #define NR_RANGE_MR 3
116 #else /* CONFIG_X86_64 */
117 #define NR_RANGE_MR 5
118 #endif
119
120 static int __meminit save_mr(struct map_range *mr, int nr_range,
121                              unsigned long start_pfn, unsigned long end_pfn,
122                              unsigned long page_size_mask)
123 {
124         if (start_pfn < end_pfn) {
125                 if (nr_range >= NR_RANGE_MR)
126                         panic("run out of range for init_memory_mapping\n");
127                 mr[nr_range].start = start_pfn<<PAGE_SHIFT;
128                 mr[nr_range].end   = end_pfn<<PAGE_SHIFT;
129                 mr[nr_range].page_size_mask = page_size_mask;
130                 nr_range++;
131         }
132
133         return nr_range;
134 }
135
136 /*
137  * Setup the direct mapping of the physical memory at PAGE_OFFSET.
138  * This runs before bootmem is initialized and gets pages directly from
139  * the physical memory. To access them they are temporarily mapped.
140  */
141 unsigned long __init_refok init_memory_mapping(unsigned long start,
142                                                unsigned long end)
143 {
144         unsigned long page_size_mask = 0;
145         unsigned long start_pfn, end_pfn;
146         unsigned long ret = 0;
147         unsigned long pos;
148
149         struct map_range mr[NR_RANGE_MR];
150         int nr_range, i;
151         int use_pse, use_gbpages;
152
153         printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
154
155 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
156         /*
157          * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
158          * This will simplify cpa(), which otherwise needs to support splitting
159          * large pages into small in interrupt context, etc.
160          */
161         use_pse = use_gbpages = 0;
162 #else
163         use_pse = cpu_has_pse;
164         use_gbpages = direct_gbpages;
165 #endif
166
167         /* Enable PSE if available */
168         if (cpu_has_pse)
169                 set_in_cr4(X86_CR4_PSE);
170
171         /* Enable PGE if available */
172         if (cpu_has_pge) {
173                 set_in_cr4(X86_CR4_PGE);
174                 __supported_pte_mask |= _PAGE_GLOBAL;
175         }
176
177         if (use_gbpages)
178                 page_size_mask |= 1 << PG_LEVEL_1G;
179         if (use_pse)
180                 page_size_mask |= 1 << PG_LEVEL_2M;
181
182         memset(mr, 0, sizeof(mr));
183         nr_range = 0;
184
185         /* head if not big page alignment ? */
186         start_pfn = start >> PAGE_SHIFT;
187         pos = start_pfn << PAGE_SHIFT;
188 #ifdef CONFIG_X86_32
189         /*
190          * Don't use a large page for the first 2/4MB of memory
191          * because there are often fixed size MTRRs in there
192          * and overlapping MTRRs into large pages can cause
193          * slowdowns.
194          */
195         if (pos == 0)
196                 end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT);
197         else
198                 end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
199                                  << (PMD_SHIFT - PAGE_SHIFT);
200 #else /* CONFIG_X86_64 */
201         end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
202                         << (PMD_SHIFT - PAGE_SHIFT);
203 #endif
204         if (end_pfn > (end >> PAGE_SHIFT))
205                 end_pfn = end >> PAGE_SHIFT;
206         if (start_pfn < end_pfn) {
207                 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
208                 pos = end_pfn << PAGE_SHIFT;
209         }
210
211         /* big page (2M) range */
212         start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
213                          << (PMD_SHIFT - PAGE_SHIFT);
214 #ifdef CONFIG_X86_32
215         end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
216 #else /* CONFIG_X86_64 */
217         end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
218                          << (PUD_SHIFT - PAGE_SHIFT);
219         if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
220                 end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
221 #endif
222
223         if (start_pfn < end_pfn) {
224                 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
225                                 page_size_mask & (1<<PG_LEVEL_2M));
226                 pos = end_pfn << PAGE_SHIFT;
227         }
228
229 #ifdef CONFIG_X86_64
230         /* big page (1G) range */
231         start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
232                          << (PUD_SHIFT - PAGE_SHIFT);
233         end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
234         if (start_pfn < end_pfn) {
235                 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
236                                 page_size_mask &
237                                  ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
238                 pos = end_pfn << PAGE_SHIFT;
239         }
240
241         /* tail is not big page (1G) alignment */
242         start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
243                          << (PMD_SHIFT - PAGE_SHIFT);
244         end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
245         if (start_pfn < end_pfn) {
246                 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
247                                 page_size_mask & (1<<PG_LEVEL_2M));
248                 pos = end_pfn << PAGE_SHIFT;
249         }
250 #endif
251
252         /* tail is not big page (2M) alignment */
253         start_pfn = pos>>PAGE_SHIFT;
254         end_pfn = end>>PAGE_SHIFT;
255         nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
256
257         /* try to merge same page size and continuous */
258         for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
259                 unsigned long old_start;
260                 if (mr[i].end != mr[i+1].start ||
261                     mr[i].page_size_mask != mr[i+1].page_size_mask)
262                         continue;
263                 /* move it */
264                 old_start = mr[i].start;
265                 memmove(&mr[i], &mr[i+1],
266                         (nr_range - 1 - i) * sizeof(struct map_range));
267                 mr[i--].start = old_start;
268                 nr_range--;
269         }
270
271         for (i = 0; i < nr_range; i++)
272                 printk(KERN_DEBUG " %010lx - %010lx page %s\n",
273                                 mr[i].start, mr[i].end,
274                         (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
275                          (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
276
277         /*
278          * Find space for the kernel direct mapping tables.
279          *
280          * Later we should allocate these tables in the local node of the
281          * memory mapped. Unfortunately this is done currently before the
282          * nodes are discovered.
283          */
284         if (!after_bootmem)
285                 find_early_table_space(end, use_pse, use_gbpages);
286
287 #ifdef CONFIG_X86_64
288 #define addr_to_page(addr)                                              \
289         ((unsigned long *)                                              \
290          ((mfn_to_pfn(((addr) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)      \
291            << PAGE_SHIFT) + __START_KERNEL_map))
292
293         if (!start) {
294                 unsigned long addr, va = __START_KERNEL_map;
295                 unsigned long *page = (unsigned long *)init_level4_pgt;
296
297                 /* Kill mapping of memory below _text. */
298                 while (va < (unsigned long)&_text) {
299                         if (HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0))
300                                 BUG();
301                         va += PAGE_SIZE;
302                 }
303
304                 /* Blow away any spurious initial mappings. */
305                 va = __START_KERNEL_map + (e820_table_start << PAGE_SHIFT);
306
307                 addr = page[pgd_index(va)];
308                 page = addr_to_page(addr);
309                 addr = page[pud_index(va)];
310                 page = addr_to_page(addr);
311                 while (pmd_index(va) | pte_index(va)) {
312                         if (pmd_none(*(pmd_t *)&page[pmd_index(va)]))
313                                 break;
314                         if (HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0))
315                                 BUG();
316                         va += PAGE_SIZE;
317                 }
318         }
319 #undef addr_to_page
320 #endif
321
322         for (i = 0; i < nr_range; i++)
323                 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
324                                                    mr[i].page_size_mask);
325
326 #ifdef CONFIG_X86_32
327         early_ioremap_page_table_range_init();
328 #endif
329
330 #ifdef CONFIG_X86_64
331         BUG_ON(e820_table_end > e820_table_top);
332         if (!start)
333                 xen_finish_init_mapping();
334         else
335 #endif
336         if (e820_table_end < e820_table_top)
337                 /* Disable the 'table_end' allocator. */
338                 e820_table_top = e820_table_end;
339
340         __flush_tlb_all();
341
342         if (!after_bootmem && e820_table_top > e820_table_start) {
343 #ifdef CONFIG_X86_64
344                 if (xen_start_info->mfn_list < __START_KERNEL_map
345                     && e820_table_start <= xen_start_info->first_p2m_pfn
346                     && e820_table_top > xen_start_info->first_p2m_pfn) {
347                         reserve_early(e820_table_start << PAGE_SHIFT,
348                                       xen_start_info->first_p2m_pfn
349                                       << PAGE_SHIFT,
350                                       "PGTABLE");
351                         e820_table_start = xen_start_info->first_p2m_pfn
352                                          + xen_start_info->nr_p2m_frames;
353                 }
354 #endif
355                 reserve_early(e820_table_start << PAGE_SHIFT,
356                               e820_table_top << PAGE_SHIFT, "PGTABLE");
357         }
358
359         if (!after_bootmem)
360                 early_memtest(start, end);
361
362         return ret >> PAGE_SHIFT;
363 }
364
365
366 /*
367  * devmem_is_allowed() checks to see if /dev/mem access to a certain address
368  * is valid. The argument is a physical page number.
369  *
370  *
371  * On x86, access has to be given to the first megabyte of ram because that area
372  * contains bios code and data regions used by X and dosemu and similar apps.
373  * Access has to be given to non-kernel-ram areas as well, these contain the PCI
374  * mmio resources as well as potential bios/acpi data regions.
375  */
376 int devmem_is_allowed(unsigned long pagenr)
377 {
378         if (pagenr <= 256)
379                 return 1;
380         if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
381                 return 0;
382         if (mfn_to_local_pfn(pagenr) >= max_pfn)
383                 return 1;
384         return 0;
385 }
386
387 void free_init_pages(char *what, unsigned long begin, unsigned long end)
388 {
389         unsigned long addr;
390         unsigned long begin_aligned, end_aligned;
391
392         /* Make sure boundaries are page aligned */
393         begin_aligned = PAGE_ALIGN(begin);
394         end_aligned   = end & PAGE_MASK;
395
396         if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
397                 begin = begin_aligned;
398                 end   = end_aligned;
399         }
400
401         if (begin >= end)
402                 return;
403
404         addr = begin;
405
406         /*
407          * If debugging page accesses then do not free this memory but
408          * mark them not present - any buggy init-section access will
409          * create a kernel page fault:
410          */
411 #ifdef CONFIG_DEBUG_PAGEALLOC
412         printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
413                 begin, end);
414         set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
415 #else
416         /*
417          * We just marked the kernel text read only above, now that
418          * we are going to free part of that, we need to make that
419          * writeable first.
420          */
421         set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
422
423         printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
424
425         for (; addr < end; addr += PAGE_SIZE) {
426                 ClearPageReserved(virt_to_page(addr));
427                 init_page_count(virt_to_page(addr));
428                 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
429 #ifdef CONFIG_X86_64
430                 if (addr >= __START_KERNEL_map) {
431                         /* make_readonly() reports all kernel addresses. */
432                         if (HYPERVISOR_update_va_mapping((unsigned long)__va(__pa(addr)),
433                                                          pfn_pte(__pa(addr) >> PAGE_SHIFT,
434                                                                  PAGE_KERNEL),
435                                                          0))
436                                 BUG();
437                         if (HYPERVISOR_update_va_mapping(addr, __pte(0), 0))
438                                 BUG();
439                 }
440 #endif
441                 free_page(addr);
442                 totalram_pages++;
443         }
444 #endif
445 }
446
447 void free_initmem(void)
448 {
449         free_init_pages("unused kernel memory",
450                         (unsigned long)(&__init_begin),
451                         (unsigned long)(&__init_end));
452 }
453
454 #ifdef CONFIG_BLK_DEV_INITRD
455 void free_initrd_mem(unsigned long start, unsigned long end)
456 {
457         /*
458          * end could be not aligned, and We can not align that,
459          * decompresser could be confused by aligned initrd_end
460          * We already reserve the end partial page before in
461          *   - i386_start_kernel()
462          *   - x86_64_start_kernel()
463          *   - relocate_initrd()
464          * So here We can do PAGE_ALIGN() safely to get partial page to be freed
465          */
466         free_init_pages("initrd memory", start, PAGE_ALIGN(end));
467 }
468 #endif