2 * Initialize MMU support.
4 * Copyright (C) 1998-2002 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
11 #include <linux/bootmem.h>
13 #include <linux/personality.h>
14 #include <linux/reboot.h>
15 #include <linux/slab.h>
16 #include <linux/swap.h>
17 #include <linux/efi.h>
18 #include <linux/mmzone.h>
20 #include <asm/a.out.h>
21 #include <asm/bitops.h>
25 #include <asm/machvec.h>
26 #include <asm/pgalloc.h>
28 #include <asm/system.h>
29 #include <asm/uaccess.h>
32 struct mmu_gather mmu_gathers[NR_CPUS];
34 /* References to section boundaries: */
35 extern char _stext, _etext, _edata, __init_begin, __init_end;
37 extern void ia64_tlb_init (void);
39 unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
41 static int pgt_cache_water[2] = { 25, 50 };
44 check_pgt_cache (void)
48 low = pgt_cache_water[0];
49 high = pgt_cache_water[1];
51 if (pgtable_cache_size > high) {
54 free_page((unsigned long)pgd_alloc_one_fast(0));
56 free_page((unsigned long)pmd_alloc_one_fast(0, 0));
57 } while (pgtable_cache_size > low);
62 * This performs some platform-dependent address space initialization.
63 * On IA-64, we want to setup the VM area for the register backing
64 * store (which grows upwards) and install the gateway page which is
65 * used for signal trampolines, etc.
68 ia64_init_addr_space (void)
70 struct vm_area_struct *vma;
73 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
74 * the problem. When the process attempts to write to the register backing store
75 * for the first time, it will get a SEGFAULT in this case.
77 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
79 vma->vm_mm = current->mm;
80 vma->vm_start = IA64_RBS_BOT;
81 vma->vm_end = vma->vm_start + PAGE_SIZE;
82 vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
83 vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP;
87 vma->vm_private_data = NULL;
88 insert_vm_struct(current->mm, vma);
91 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
92 if (!(current->personality & MMAP_PAGE_ZERO)) {
93 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
95 memset(vma, 0, sizeof(*vma));
96 vma->vm_mm = current->mm;
97 vma->vm_end = PAGE_SIZE;
98 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
99 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
100 insert_vm_struct(current->mm, vma);
110 addr = (unsigned long) &__init_begin;
111 for (; addr < (unsigned long) &__init_end; addr += PAGE_SIZE) {
112 ClearPageReserved(virt_to_page(addr));
113 set_page_count(virt_to_page(addr), 1);
117 printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
118 (&__init_end - &__init_begin) >> 10);
122 free_initrd_mem (unsigned long start, unsigned long end)
126 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
127 * Thus EFI and the kernel may have different page sizes. It is
128 * therefore possible to have the initrd share the same page as
129 * the end of the kernel (given current setup).
131 * To avoid freeing/using the wrong page (kernel sized) we:
132 * - align up the beginning of initrd
133 * - align down the end of initrd
136 * |=============| a000
142 * |=============| 8000
145 * |/////////////| 7000
148 * |=============| 6000
151 * K=kernel using 8KB pages
153 * In this example, we must free page 8000 ONLY. So we must align up
154 * initrd_start and keep initrd_end as is.
156 start = PAGE_ALIGN(start);
157 end = end & PAGE_MASK;
160 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
162 for (; start < end; start += PAGE_SIZE) {
163 if (!virt_addr_valid(start))
165 page = virt_to_page(start);
166 ClearPageReserved(page);
167 set_page_count(page, 1);
176 int i, total = 0, reserved = 0;
177 int shared = 0, cached = 0;
179 printk("Mem-info:\n");
182 #ifdef CONFIG_DISCONTIGMEM
186 printk("Free swap: %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
187 for_each_pgdat(pgdat) {
188 printk("Node ID: %d\n", pgdat->node_id);
189 for(i = 0; i < pgdat->node_size; i++) {
190 if (PageReserved(pgdat->node_mem_map+i))
192 else if (PageSwapCache(pgdat->node_mem_map+i))
194 else if (page_count(pgdat->node_mem_map + i))
195 shared += page_count(pgdat->node_mem_map + i) - 1;
197 printk("\t%d pages of RAM\n", pgdat->node_size);
198 printk("\t%d reserved pages\n", reserved);
199 printk("\t%d pages shared\n", shared);
200 printk("\t%d pages swap cached\n", cached);
202 printk("Total of %ld pages in page table cache\n", pgtable_cache_size);
203 printk("%d free buffer pages\n", nr_free_buffer_pages());
205 #else /* !CONFIG_DISCONTIGMEM */
206 printk("Free swap: %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
210 if (PageReserved(mem_map+i))
212 else if (PageSwapCache(mem_map+i))
214 else if (page_count(mem_map + i))
215 shared += page_count(mem_map + i) - 1;
217 printk("%d pages of RAM\n", total);
218 printk("%d reserved pages\n", reserved);
219 printk("%d pages shared\n", shared);
220 printk("%d pages swap cached\n", cached);
221 printk("%ld pages in page table cache\n", pgtable_cache_size);
222 #endif /* !CONFIG_DISCONTIGMEM */
226 * This is like put_dirty_page() but installs a clean page with PAGE_GATE protection
227 * (execute-only, typically).
230 put_gate_page (struct page *page, unsigned long address)
236 if (!PageReserved(page))
237 printk(KERN_ERR "put_gate_page: gate page at 0x%p not in reserved memory\n",
240 pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
242 spin_lock(&init_mm.page_table_lock);
244 pmd = pmd_alloc(&init_mm, pgd, address);
247 pte = pte_alloc_map(&init_mm, pmd, address);
250 if (!pte_none(*pte)) {
254 set_pte(pte, mk_pte(page, PAGE_GATE));
257 out: spin_unlock(&init_mm.page_table_lock);
258 /* no need for flush_tlb */
263 ia64_mmu_init (void *my_cpu_data)
265 unsigned long psr, rid, pta, impl_va_bits;
266 extern void __init tlb_init (void);
267 #ifdef CONFIG_DISABLE_VHPT
268 # define VHPT_ENABLE_BIT 0
270 # define VHPT_ENABLE_BIT 1
274 * Set up the kernel identity mapping for regions 6 and 5. The mapping for region
275 * 7 is setup up in _start().
277 psr = ia64_clear_ic();
279 rid = ia64_rid(IA64_REGION_ID_KERNEL, __IA64_UNCACHED_OFFSET);
280 ia64_set_rr(__IA64_UNCACHED_OFFSET, (rid << 8) | (IA64_GRANULE_SHIFT << 2));
282 rid = ia64_rid(IA64_REGION_ID_KERNEL, VMALLOC_START);
283 ia64_set_rr(VMALLOC_START, (rid << 8) | (PAGE_SHIFT << 2) | 1);
285 /* ensure rr6 is up-to-date before inserting the PERCPU_ADDR translation: */
288 ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
289 pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
296 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
297 * address space. The IA-64 architecture guarantees that at least 50 bits of
298 * virtual address space are implemented but if we pick a large enough page size
299 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
300 * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
301 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
302 * problem in practice. Alternatively, we could truncate the top of the mapped
303 * address space to not permit mappings that would overlap with the VMLPT.
307 # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
309 * The virtual page table has to cover the entire implemented address space within
310 * a region even though not all of this space may be mappable. The reason for
311 * this is that the Access bit and Dirty bit fault handlers perform
312 * non-speculative accesses to the virtual page table, so the address range of the
313 * virtual page table itself needs to be covered by virtual page table.
315 # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
316 # define POW2(n) (1ULL << (n))
318 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
320 if (impl_va_bits < 51 || impl_va_bits > 61)
321 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
323 /* place the VMLPT at the end of each page-table mapped region: */
324 pta = POW2(61) - POW2(vmlpt_bits);
326 if (POW2(mapped_space_bits) >= pta)
327 panic("mm/init: overlap between virtually mapped linear page table and "
328 "mapped kernel space!");
330 * Set the (virtually mapped linear) page table address. Bit
331 * 8 selects between the short and long format, bits 2-7 the
332 * size of the table, and bit 0 whether the VHPT walker is
335 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
341 * Set up the page tables.
344 #ifdef CONFIG_DISCONTIGMEM
348 extern void discontig_paging_init(void);
350 discontig_paging_init();
352 #else /* !CONFIG_DISCONTIGMEM */
356 unsigned long max_dma, zones_size[MAX_NR_ZONES];
358 /* initialize mem_map[] */
360 memset(zones_size, 0, sizeof(zones_size));
362 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
363 if (max_low_pfn < max_dma)
364 zones_size[ZONE_DMA] = max_low_pfn;
366 zones_size[ZONE_DMA] = max_dma;
367 zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
369 free_area_init(zones_size);
371 #endif /* !CONFIG_DISCONTIGMEM */
374 count_pages (u64 start, u64 end, void *arg)
376 unsigned long *count = arg;
378 *count += (end - start) >> PAGE_SHIFT;
383 count_reserved_pages (u64 start, u64 end, void *arg)
385 unsigned long num_reserved = 0;
386 unsigned long *count = arg;
388 for (; start < end; start += PAGE_SIZE)
389 if (PageReserved(virt_to_page(start)))
391 *count += num_reserved;
398 extern char __start_gate_section[];
399 long reserved_pages, codesize, datasize, initsize;
400 unsigned long num_pgt_pages;
405 * This needs to be called _after_ the command line has been parsed but _before_
406 * any drivers that may need the PCI DMA interface are initialized or bootmem has
409 platform_pci_dma_init();
412 #ifndef CONFIG_DISCONTIGMEM
415 max_mapnr = max_low_pfn;
419 efi_memmap_walk(count_pages, &num_physpages);
421 high_memory = __va(max_low_pfn * PAGE_SIZE);
423 for_each_pgdat(pgdat)
424 totalram_pages += free_all_bootmem_node(pgdat);
427 efi_memmap_walk(count_reserved_pages, &reserved_pages);
429 codesize = (unsigned long) &_etext - (unsigned long) &_stext;
430 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
431 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
433 printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
434 "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
435 num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
436 reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
439 * Allow for enough (cached) page table pages so that we can map the entire memory
440 * at least once. Each task also needs a couple of page tables pages, so add in a
441 * fudge factor for that (don't use "threads-max" here; that would be wrong!).
442 * Don't allow the cache to be more than 10% of total memory, though.
444 # define NUM_TASKS 500 /* typical number of tasks */
445 num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS;
446 if (num_pgt_pages > nr_free_pages() / 10)
447 num_pgt_pages = nr_free_pages() / 10;
448 if (num_pgt_pages > pgt_cache_water[1])
449 pgt_cache_water[1] = num_pgt_pages;
451 /* install the gate page in the global page table: */
452 put_gate_page(virt_to_page(__start_gate_section), GATE_ADDR);
454 #ifdef CONFIG_IA32_SUPPORT