2 * Initialize MMU support.
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
11 #include <linux/bootmem.h>
12 #include <linux/efi.h>
13 #include <linux/elf.h>
15 #include <linux/mmzone.h>
16 #include <linux/personality.h>
17 #include <linux/reboot.h>
18 #include <linux/slab.h>
19 #include <linux/swap.h>
21 #include <asm/a.out.h>
22 #include <asm/bitops.h>
26 #include <asm/machvec.h>
27 #include <asm/patch.h>
28 #include <asm/pgalloc.h>
30 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/unistd.h>
35 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
37 /* References to section boundaries: */
38 extern char _stext, _etext, _edata, __init_begin, __init_end, _end;
40 extern void ia64_tlb_init (void);
42 unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
44 #ifdef CONFIG_VIRTUAL_MEM_MAP
45 # define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
46 unsigned long vmalloc_end = VMALLOC_END_INIT;
47 static struct page *vmem_map;
48 static unsigned long num_dma_physpages;
51 static int pgt_cache_water[2] = { 25, 50 };
53 struct page *zero_page_memmap_ptr; /* map entry for zero page */
56 check_pgt_cache (void)
60 low = pgt_cache_water[0];
61 high = pgt_cache_water[1];
63 if (pgtable_cache_size > (u64) high) {
66 free_page((unsigned long)pgd_alloc_one_fast(0));
68 free_page((unsigned long)pmd_alloc_one_fast(0, 0));
69 } while (pgtable_cache_size > (u64) low);
74 update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
80 return; /* not an executable page... */
83 /* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */
84 addr = (unsigned long) page_address(page);
86 if (test_bit(PG_arch_1, &page->flags))
87 return; /* i-cache is already coherent with d-cache */
89 flush_icache_range(addr, addr + PAGE_SIZE);
90 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
94 ia64_set_rbs_bot (void)
96 unsigned long stack_size = current->rlim[RLIMIT_STACK].rlim_max & -16;
98 if (stack_size > MAX_USER_STACK_SIZE)
99 stack_size = MAX_USER_STACK_SIZE;
100 current->thread.rbs_bot = STACK_TOP - stack_size;
104 * This performs some platform-dependent address space initialization.
105 * On IA-64, we want to setup the VM area for the register backing
106 * store (which grows upwards) and install the gateway page which is
107 * used for signal trampolines, etc.
110 ia64_init_addr_space (void)
112 struct vm_area_struct *vma;
117 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
118 * the problem. When the process attempts to write to the register backing store
119 * for the first time, it will get a SEGFAULT in this case.
121 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
123 vma->vm_mm = current->mm;
124 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
125 vma->vm_end = vma->vm_start + PAGE_SIZE;
126 vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
127 vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP;
131 vma->vm_private_data = NULL;
132 insert_vm_struct(current->mm, vma);
135 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
136 if (!(current->personality & MMAP_PAGE_ZERO)) {
137 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
139 memset(vma, 0, sizeof(*vma));
140 vma->vm_mm = current->mm;
141 vma->vm_end = PAGE_SIZE;
142 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
143 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
144 insert_vm_struct(current->mm, vma);
152 unsigned long addr, eaddr;
154 addr = (unsigned long) ia64_imva(&__init_begin);
155 eaddr = (unsigned long) ia64_imva(&__init_end);
156 while (addr < eaddr) {
157 ClearPageReserved(virt_to_page(addr));
158 set_page_count(virt_to_page(addr), 1);
163 printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
164 (&__init_end - &__init_begin) >> 10);
168 free_initrd_mem (unsigned long start, unsigned long end)
172 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
173 * Thus EFI and the kernel may have different page sizes. It is
174 * therefore possible to have the initrd share the same page as
175 * the end of the kernel (given current setup).
177 * To avoid freeing/using the wrong page (kernel sized) we:
178 * - align up the beginning of initrd
179 * - align down the end of initrd
182 * |=============| a000
188 * |=============| 8000
191 * |/////////////| 7000
194 * |=============| 6000
197 * K=kernel using 8KB pages
199 * In this example, we must free page 8000 ONLY. So we must align up
200 * initrd_start and keep initrd_end as is.
202 start = PAGE_ALIGN(start);
203 end = end & PAGE_MASK;
206 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
208 for (; start < end; start += PAGE_SIZE) {
209 if (!virt_addr_valid(start))
211 page = virt_to_page(start);
212 ClearPageReserved(page);
213 set_page_count(page, 1);
222 int i, total = 0, reserved = 0;
223 int shared = 0, cached = 0;
225 printk("Mem-info:\n");
228 #ifdef CONFIG_DISCONTIGMEM
232 printk("Free swap: %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
233 for_each_pgdat(pgdat) {
234 printk("Node ID: %d\n", pgdat->node_id);
235 for(i = 0; i < pgdat->node_size; i++) {
236 if (PageReserved(pgdat->node_mem_map+i))
238 else if (PageSwapCache(pgdat->node_mem_map+i))
240 else if (page_count(pgdat->node_mem_map + i))
241 shared += page_count(pgdat->node_mem_map + i) - 1;
243 printk("\t%d pages of RAM\n", pgdat->node_size);
244 printk("\t%d reserved pages\n", reserved);
245 printk("\t%d pages shared\n", shared);
246 printk("\t%d pages swap cached\n", cached);
248 printk("Total of %ld pages in page table cache\n", pgtable_cache_size);
249 printk("%d free buffer pages\n", nr_free_buffer_pages());
251 #else /* !CONFIG_DISCONTIGMEM */
252 printk("Free swap: %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
256 if (PageReserved(mem_map+i))
258 else if (PageSwapCache(mem_map+i))
260 else if (page_count(mem_map + i))
261 shared += page_count(mem_map + i) - 1;
263 printk("%d pages of RAM\n", total);
264 printk("%d reserved pages\n", reserved);
265 printk("%d pages shared\n", shared);
266 printk("%d pages swap cached\n", cached);
267 printk("%ld pages in page table cache\n", pgtable_cache_size);
268 #endif /* !CONFIG_DISCONTIGMEM */
272 * This is like put_dirty_page() but installs a clean page in the kernel's page table.
275 put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
281 if (!PageReserved(page))
282 printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
285 pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
287 spin_lock(&init_mm.page_table_lock);
289 pmd = pmd_alloc(&init_mm, pgd, address);
292 pte = pte_alloc_map(&init_mm, pmd, address);
295 if (!pte_none(*pte)) {
299 set_pte(pte, mk_pte(page, pgprot));
302 out: spin_unlock(&init_mm.page_table_lock);
303 /* no need for flush_tlb */
311 extern char __start_gate_section[];
314 * Map the gate page twice: once read-only to export the ELF headers etc. and once
315 * execute-only page to enable privilege-promotion via "epc":
317 page = virt_to_page(ia64_imva(__start_gate_section));
318 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
319 #ifdef HAVE_BUGGY_SEGREL
320 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
321 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
323 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
329 ia64_mmu_init (void *my_cpu_data)
331 unsigned long psr, pta, impl_va_bits;
332 extern void __init tlb_init (void);
333 #ifdef CONFIG_DISABLE_VHPT
334 # define VHPT_ENABLE_BIT 0
336 # define VHPT_ENABLE_BIT 1
339 /* Pin mapping for percpu area into TLB */
340 psr = ia64_clear_ic();
341 ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
342 pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
349 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
350 * address space. The IA-64 architecture guarantees that at least 50 bits of
351 * virtual address space are implemented but if we pick a large enough page size
352 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
353 * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
354 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
355 * problem in practice. Alternatively, we could truncate the top of the mapped
356 * address space to not permit mappings that would overlap with the VMLPT.
360 # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
362 * The virtual page table has to cover the entire implemented address space within
363 * a region even though not all of this space may be mappable. The reason for
364 * this is that the Access bit and Dirty bit fault handlers perform
365 * non-speculative accesses to the virtual page table, so the address range of the
366 * virtual page table itself needs to be covered by virtual page table.
368 # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
369 # define POW2(n) (1ULL << (n))
371 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
373 if (impl_va_bits < 51 || impl_va_bits > 61)
374 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
376 /* place the VMLPT at the end of each page-table mapped region: */
377 pta = POW2(61) - POW2(vmlpt_bits);
379 if (POW2(mapped_space_bits) >= pta)
380 panic("mm/init: overlap between virtually mapped linear page table and "
381 "mapped kernel space!");
383 * Set the (virtually mapped linear) page table address. Bit
384 * 8 selects between the short and long format, bits 2-7 the
385 * size of the table, and bit 0 whether the VHPT walker is
388 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
393 #ifdef CONFIG_VIRTUAL_MEM_MAP
396 create_mem_map_page_table (u64 start, u64 end, void *arg)
398 unsigned long address, start_page, end_page;
399 struct page *map_start, *map_end;
404 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
405 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
407 start_page = (unsigned long) map_start & PAGE_MASK;
408 end_page = PAGE_ALIGN((unsigned long) map_end);
410 for (address = start_page; address < end_page; address += PAGE_SIZE) {
411 pgd = pgd_offset_k(address);
413 pgd_populate(&init_mm, pgd, alloc_bootmem_pages(PAGE_SIZE));
414 pmd = pmd_offset(pgd, address);
417 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages(PAGE_SIZE));
418 pte = pte_offset_kernel(pmd, address);
421 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages(PAGE_SIZE)) >> PAGE_SHIFT,
427 struct memmap_init_callback_data {
435 virtual_memmap_init (u64 start, u64 end, void *arg)
437 struct memmap_init_callback_data *args;
438 struct page *map_start, *map_end;
440 args = (struct memmap_init_callback_data *) arg;
442 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
443 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
445 if (map_start < args->start)
446 map_start = args->start;
447 if (map_end > args->end)
451 * We have to initialize "out of bounds" struct page elements that fit completely
452 * on the same pages that were allocated for the "in bounds" elements because they
453 * may be referenced later (and found to be "reserved").
455 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
456 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
457 / sizeof(struct page));
459 if (map_start < map_end)
460 memmap_init_zone(map_start, (unsigned long) (map_end - map_start),
461 args->nid, args->zone, page_to_pfn(map_start));
466 memmap_init (struct page *start, unsigned long size, int nid,
467 unsigned long zone, unsigned long start_pfn)
470 memmap_init_zone(start, size, nid, zone, start_pfn);
472 struct memmap_init_callback_data args;
475 args.end = start + size;
479 efi_memmap_walk(virtual_memmap_init, &args);
484 ia64_pfn_valid (unsigned long pfn)
488 return __get_user(byte, (char *) pfn_to_page(pfn)) == 0;
492 count_dma_pages (u64 start, u64 end, void *arg)
494 unsigned long *count = arg;
496 if (end <= MAX_DMA_ADDRESS)
497 *count += (end - start) >> PAGE_SHIFT;
502 find_largest_hole (u64 start, u64 end, void *arg)
506 static u64 last_end = PAGE_OFFSET;
508 /* NOTE: this algorithm assumes efi memmap table is ordered */
510 if (*max_gap < (start - last_end))
511 *max_gap = start - last_end;
515 #endif /* CONFIG_VIRTUAL_MEM_MAP */
518 count_pages (u64 start, u64 end, void *arg)
520 unsigned long *count = arg;
522 *count += (end - start) >> PAGE_SHIFT;
527 * Set up the page tables.
530 #ifdef CONFIG_DISCONTIGMEM
534 extern void discontig_paging_init(void);
536 discontig_paging_init();
537 efi_memmap_walk(count_pages, &num_physpages);
538 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
540 #else /* !CONFIG_DISCONTIGMEM */
544 unsigned long max_dma;
545 unsigned long zones_size[MAX_NR_ZONES];
546 # ifdef CONFIG_VIRTUAL_MEM_MAP
547 unsigned long zholes_size[MAX_NR_ZONES];
548 unsigned long max_gap;
551 /* initialize mem_map[] */
553 memset(zones_size, 0, sizeof(zones_size));
556 efi_memmap_walk(count_pages, &num_physpages);
558 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
560 # ifdef CONFIG_VIRTUAL_MEM_MAP
561 memset(zholes_size, 0, sizeof(zholes_size));
563 num_dma_physpages = 0;
564 efi_memmap_walk(count_dma_pages, &num_dma_physpages);
566 if (max_low_pfn < max_dma) {
567 zones_size[ZONE_DMA] = max_low_pfn;
568 zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages;
570 zones_size[ZONE_DMA] = max_dma;
571 zholes_size[ZONE_DMA] = max_dma - num_dma_physpages;
572 if (num_physpages > num_dma_physpages) {
573 zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
574 zholes_size[ZONE_NORMAL] = ((max_low_pfn - max_dma)
575 - (num_physpages - num_dma_physpages));
580 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
581 if (max_gap < LARGE_GAP) {
582 vmem_map = (struct page *) 0;
583 free_area_init_node(0, &contig_page_data, NULL, zones_size, 0, zholes_size);
584 mem_map = contig_page_data.node_mem_map;
587 unsigned long map_size;
589 /* allocate virtual_mem_map */
591 map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
592 vmalloc_end -= map_size;
593 vmem_map = (struct page *) vmalloc_end;
594 efi_memmap_walk(create_mem_map_page_table, 0);
596 free_area_init_node(0, &contig_page_data, vmem_map, zones_size, 0, zholes_size);
598 mem_map = contig_page_data.node_mem_map;
599 printk("Virtual mem_map starts at 0x%p\n", mem_map);
601 # else /* !CONFIG_VIRTUAL_MEM_MAP */
602 if (max_low_pfn < max_dma)
603 zones_size[ZONE_DMA] = max_low_pfn;
605 zones_size[ZONE_DMA] = max_dma;
606 zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
608 free_area_init(zones_size);
609 # endif /* !CONFIG_VIRTUAL_MEM_MAP */
610 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
612 #endif /* !CONFIG_DISCONTIGMEM */
615 count_reserved_pages (u64 start, u64 end, void *arg)
617 unsigned long num_reserved = 0;
618 unsigned long *count = arg;
620 for (; start < end; start += PAGE_SIZE)
621 if (PageReserved(virt_to_page(start)))
623 *count += num_reserved;
628 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
629 * system call handler. When this option is in effect, all fsyscalls will end up bubbling
630 * down into the kernel and calling the normal (heavy-weight) syscall handler. This is
631 * useful for performance testing, but conceivably could also come in handy for debugging
638 nolwsys_setup (char *s)
644 __setup("nolwsys", nolwsys_setup);
649 long reserved_pages, codesize, datasize, initsize;
650 unsigned long num_pgt_pages;
653 static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
657 * This needs to be called _after_ the command line has been parsed but _before_
658 * any drivers that may need the PCI DMA interface are initialized or bootmem has
664 #ifndef CONFIG_DISCONTIGMEM
667 max_mapnr = max_low_pfn;
670 high_memory = __va(max_low_pfn * PAGE_SIZE);
672 kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE);
673 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
674 kclist_add(&kcore_kernel, &_stext, &_end - &_stext);
676 for_each_pgdat(pgdat)
677 totalram_pages += free_all_bootmem_node(pgdat);
680 efi_memmap_walk(count_reserved_pages, &reserved_pages);
682 codesize = (unsigned long) &_etext - (unsigned long) &_stext;
683 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
684 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
686 printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
687 "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
688 num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
689 reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
692 * Allow for enough (cached) page table pages so that we can map the entire memory
693 * at least once. Each task also needs a couple of page tables pages, so add in a
694 * fudge factor for that (don't use "threads-max" here; that would be wrong!).
695 * Don't allow the cache to be more than 10% of total memory, though.
697 # define NUM_TASKS 500 /* typical number of tasks */
698 num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS;
699 if (num_pgt_pages > nr_free_pages() / 10)
700 num_pgt_pages = nr_free_pages() / 10;
701 if (num_pgt_pages > (u64) pgt_cache_water[1])
702 pgt_cache_water[1] = num_pgt_pages;
705 * For fsyscall entrpoints with no light-weight handler, use the ordinary
706 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
707 * code can tell them apart.
709 for (i = 0; i < NR_syscalls; ++i) {
710 extern unsigned long fsyscall_table[NR_syscalls];
711 extern unsigned long sys_call_table[NR_syscalls];
713 if (!fsyscall_table[i] || nolwsys)
714 fsyscall_table[i] = sys_call_table[i] | 1;
716 setup_gate(); /* setup gate pages before we free up boot memory... */
718 #ifdef CONFIG_IA32_SUPPORT