2 * linux/arch/arm/mm/mm-armo.c
4 * Copyright (C) 1998-2000 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Page table sludge for older ARM processor architectures.
12 #include <linux/sched.h>
14 #include <linux/init.h>
15 #include <linux/bootmem.h>
17 #include <asm/pgtable.h>
18 #include <asm/pgalloc.h>
20 #include <asm/arch/memory.h>
22 #include <asm/mach/map.h>
24 #define MEMC_TABLE_SIZE (256*sizeof(unsigned long))
25 #define PGD_TABLE_SIZE (PTRS_PER_PGD * BYTES_PER_PTR)
29 extern unsigned long get_page_2k(int prio);
30 extern void free_page_2k(unsigned long);
31 extern pte_t *get_bad_pte_table(void);
34 * Allocate a page table. Note that we place the MEMC
35 * table before the page directory. This means we can
36 * easily get to both tightly-associated data structures
37 * with a single pointer.
39 * We actually only need 1152 bytes, 896 bytes is wasted.
40 * We could try to fit 7 PTEs into that slot somehow.
42 static inline void *alloc_pgd_table(int priority)
46 pg2k = get_page_2k(priority);
48 pg2k += MEMC_TABLE_SIZE;
53 void free_pgd_slow(pgd_t *pgd)
55 unsigned long tbl = (unsigned long)pgd;
57 tbl -= MEMC_TABLE_SIZE;
62 * FIXME: the following over-allocates by 1600%
64 static inline void *alloc_pte_table(int size, int prio)
67 printk("invalid table size\n");
68 return (void *)get_page_2k(prio);
71 void free_pte_slow(pte_t *pte)
73 unsigned long tbl = (unsigned long)pte;
77 pgd_t *get_pgd_slow(void)
79 pgd_t *pgd = (pgd_t *)alloc_pgd_table(GFP_KERNEL);
83 pgd_t *init = pgd_offset(&init_mm, 0);
85 memzero(pgd, USER_PTRS_PER_PGD * sizeof(pgd_t));
86 memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
87 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
90 * On ARM, first page must always be allocated
92 if (!pmd_alloc(pgd, 0))
95 pmd_t *old_pmd = pmd_offset(init, 0);
96 new_pmd = pmd_offset(pgd, 0);
98 if (!pte_alloc(new_pmd, 0))
101 pte_t *new_pte = pte_offset(new_pmd, 0);
102 pte_t *old_pte = pte_offset(old_pmd, 0);
104 set_pte (new_pte, *old_pte);
107 /* update MEMC tables */
108 cpu_memc_update_all(pgd);
119 pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
123 pte = (pte_t *)alloc_pte_table(PTRS_PER_PTE * sizeof(pte_t), GFP_KERNEL);
124 if (pmd_none(*pmd)) {
126 memzero(pte, PTRS_PER_PTE * sizeof(pte_t));
127 set_pmd(pmd, mk_user_pmd(pte));
130 set_pmd(pmd, mk_user_pmd(get_bad_pte_table()));
135 __handle_bad_pmd(pmd);
138 return (pte_t *) pmd_page(*pmd) + offset;
142 * No special code is required here.
144 void setup_mm_for_reboot(char mode)
149 * This contains the code to setup the memory map on an ARM2/ARM250/ARM3
150 * machine. This is both processor & architecture specific, and requires
151 * some more work to get it to fit into our separate processor and
152 * architecture structure.
154 void __init memtable_init(struct meminfo *mi)
159 page_nr = max_low_pfn;
161 pte = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t));
162 pte[0] = mk_pte_phys(PAGE_OFFSET + 491520, PAGE_READONLY);
163 set_pmd(pmd_offset(swapper_pg_dir, 0), mk_kernel_pmd(pte));
165 for (i = 1; i < PTRS_PER_PGD; i++)
166 pgd_val(swapper_pg_dir[i]) = 0;
169 void __init iotable_init(struct map_desc *io_desc)
175 * We never have holes in the memmap
177 void __init create_memmap_holes(struct meminfo *mi)