2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Derived from include/asm-i386/pgtable.h
4 * Licensed under the GPL
10 #include "linux/sched.h"
11 #include "asm/processor.h"
13 #include "asm/fixmap.h"
15 extern pgd_t swapper_pg_dir[1024];
17 extern void *um_virt_to_phys(struct task_struct *task, unsigned long virt,
20 /* zero page used for uninitialized stuff */
21 extern unsigned long *empty_zero_page;
23 #define pgtable_cache_init() do ; while (0)
25 /* PMD_SHIFT determines the size of the area a second-level page table can map */
27 #define PMD_SIZE (1UL << PMD_SHIFT)
28 #define PMD_MASK (~(PMD_SIZE-1))
30 /* PGDIR_SHIFT determines what a third-level page table entry can map */
31 #define PGDIR_SHIFT 22
32 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
33 #define PGDIR_MASK (~(PGDIR_SIZE-1))
36 * entries per page directory level: the i386 is two-level, so
37 * we don't really have any PMD directory physically.
39 #define PTRS_PER_PTE 1024
40 #define PTRS_PER_PMD 1
41 #define PTRS_PER_PGD 1024
42 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
43 #define FIRST_USER_PGD_NR 0
45 #define pte_ERROR(e) \
46 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
47 #define pmd_ERROR(e) \
48 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
49 #define pgd_ERROR(e) \
50 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
53 * pgd entries used up by user/kernel:
56 #define USER_PGD_PTRS (TASK_SIZE >> PGDIR_SHIFT)
57 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
60 /* Just any arbitrary offset to the start of the vmalloc VM area: the
61 * current 8MB value just means that there will be a 8MB "hole" after the
62 * physical memory until the kernel virtual memory starts. That means that
63 * any out-of-bounds memory accesses will hopefully be caught.
64 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
65 * area for the same reason. ;)
68 extern unsigned long high_physmem;
70 #define VMALLOC_OFFSET (__va_space)
71 #define VMALLOC_START (((unsigned long) high_physmem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
74 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
76 # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
79 #define _PAGE_PRESENT 0x001
80 #define _PAGE_NEWPAGE 0x002
81 #define _PAGE_NEWPROT 0x004
82 #define _PAGE_FILE 0x008 /* set:pagecache unset:swap */
83 #define _PAGE_PROTNONE 0x010 /* If not present */
84 #define _PAGE_RW 0x020
85 #define _PAGE_USER 0x040
86 #define _PAGE_ACCESSED 0x080
87 #define _PAGE_DIRTY 0x100
89 #define REGION_MASK 0xf0000000
90 #define REGION_SHIFT 28
92 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
93 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
94 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
96 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
97 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
98 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
99 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
100 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
101 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
104 * The i386 can't do page protection for execute, and considers that the same are read.
105 * Also, write permissions imply read permissions. This is the closest we can get..
107 #define __P000 PAGE_NONE
108 #define __P001 PAGE_READONLY
109 #define __P010 PAGE_COPY
110 #define __P011 PAGE_COPY
111 #define __P100 PAGE_READONLY
112 #define __P101 PAGE_READONLY
113 #define __P110 PAGE_COPY
114 #define __P111 PAGE_COPY
116 #define __S000 PAGE_NONE
117 #define __S001 PAGE_READONLY
118 #define __S010 PAGE_SHARED
119 #define __S011 PAGE_SHARED
120 #define __S100 PAGE_READONLY
121 #define __S101 PAGE_READONLY
122 #define __S110 PAGE_SHARED
123 #define __S111 PAGE_SHARED
126 * Define this if things work differently on an i386 and an i486:
127 * it will (on an i486) warn about kernel memory accesses that are
128 * done without a 'verify_area(VERIFY_WRITE,..)'
130 #undef TEST_VERIFY_AREA
132 /* page table for 0-4MB for everybody */
133 extern unsigned long pg0[1024];
136 * BAD_PAGETABLE is used when we need a bogus page-table, while
137 * BAD_PAGE is used for a bogus page.
139 * ZERO_PAGE is a global shared page that is always zero: used
140 * for zero-mapped memory areas etc..
142 extern pte_t __bad_page(void);
143 extern pte_t * __bad_pagetable(void);
145 #define BAD_PAGETABLE __bad_pagetable()
146 #define BAD_PAGE __bad_page()
147 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
149 /* number of bits that fit into a memory pointer */
150 #define BITS_PER_PTR (8*sizeof(unsigned long))
152 /* to align the pointer to a pointer address */
153 #define PTR_MASK (~(sizeof(void*)-1))
155 /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
156 /* 64-bit machines, beware! SRB. */
157 #define SIZEOF_PTR_LOG2 2
159 /* to find an entry in a page-table */
160 #define PAGE_PTR(address) \
161 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
163 #define pte_none(x) !(pte_val(x) & ~_PAGE_NEWPAGE)
164 #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
166 #define pte_clear(xp) do { pte_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
168 #define phys_region_index(x) (((x) & REGION_MASK) >> REGION_SHIFT)
169 #define pte_region_index(x) phys_region_index(pte_val(x))
171 #define pmd_none(x) (!(pmd_val(x) & ~_PAGE_NEWPAGE))
172 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
173 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
174 #define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
176 #define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
177 #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
180 * The "pgd_xxx()" functions here are trivial for a folded two-level
181 * setup: the pgd is never bad, and a pmd always exists (as it's folded
182 * into the pgd entry)
184 static inline int pgd_none(pgd_t pgd) { return 0; }
185 static inline int pgd_bad(pgd_t pgd) { return 0; }
186 static inline int pgd_present(pgd_t pgd) { return 1; }
187 static inline void pgd_clear(pgd_t * pgdp) { }
190 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
192 extern struct page *pte_mem_map(pte_t pte);
193 extern struct page *phys_mem_map(unsigned long phys);
194 extern unsigned long phys_to_pfn(unsigned long p);
195 extern unsigned long pfn_to_phys(unsigned long pfn);
197 #define pte_page(x) pfn_to_page(pte_pfn(x))
198 #define pte_address(x) (__va(pte_val(x) & PAGE_MASK))
199 #define mk_phys(a, r) ((a) + (r << REGION_SHIFT))
200 #define phys_addr(p) ((p) & ~REGION_MASK)
201 #define phys_page(p) (phys_mem_map(p) + ((phys_addr(p)) >> PAGE_SHIFT))
202 #define pte_pfn(x) phys_to_pfn(pte_val(x))
203 #define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
204 #define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
207 * Bits 0 through 3 are taken
209 #define PTE_FILE_MAX_BITS 28
211 #define pte_to_pgoff(pte) ((pte).pte_low >> 4)
213 #define pgoff_to_pte(off) \
214 ((pte_t) { ((off) << 4) + _PAGE_FILE })
216 static inline pte_t pte_mknewprot(pte_t pte)
218 pte_val(pte) |= _PAGE_NEWPROT;
222 static inline pte_t pte_mknewpage(pte_t pte)
224 pte_val(pte) |= _PAGE_NEWPAGE;
228 static inline void set_pte(pte_t *pteptr, pte_t pteval)
230 /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
231 * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
234 *pteptr = pte_mknewpage(pteval);
235 if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
239 * (pmds are folded into pgds so this doesn't get actually called,
240 * but the define is needed for a generic inline function.)
242 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
243 #define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval)
246 * The following only work if pte_present() is true.
247 * Undefined behaviour if not..
249 static inline int pte_user(pte_t pte)
251 return((pte_val(pte) & _PAGE_USER) &&
252 !(pte_val(pte) & _PAGE_PROTNONE));
255 static inline int pte_read(pte_t pte)
257 return((pte_val(pte) & _PAGE_USER) &&
258 !(pte_val(pte) & _PAGE_PROTNONE));
261 static inline int pte_exec(pte_t pte){
262 return((pte_val(pte) & _PAGE_USER) &&
263 !(pte_val(pte) & _PAGE_PROTNONE));
266 static inline int pte_write(pte_t pte)
268 return((pte_val(pte) & _PAGE_RW) &&
269 !(pte_val(pte) & _PAGE_PROTNONE));
273 * The following only works if pte_present() is not true.
275 static inline int pte_file(pte_t pte)
277 return (pte).pte_low & _PAGE_FILE;
280 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
281 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
282 static inline int pte_newpage(pte_t pte) { return pte_val(pte) & _PAGE_NEWPAGE; }
283 static inline int pte_newprot(pte_t pte)
285 return(pte_present(pte) && (pte_val(pte) & _PAGE_NEWPROT));
288 static inline pte_t pte_rdprotect(pte_t pte)
290 pte_val(pte) &= ~_PAGE_USER;
291 return(pte_mknewprot(pte));
294 static inline pte_t pte_exprotect(pte_t pte)
296 pte_val(pte) &= ~_PAGE_USER;
297 return(pte_mknewprot(pte));
300 static inline pte_t pte_mkclean(pte_t pte)
302 pte_val(pte) &= ~_PAGE_DIRTY;
306 static inline pte_t pte_mkold(pte_t pte)
308 pte_val(pte) &= ~_PAGE_ACCESSED;
312 static inline pte_t pte_wrprotect(pte_t pte)
314 pte_val(pte) &= ~_PAGE_RW;
315 return(pte_mknewprot(pte));
318 static inline pte_t pte_mkread(pte_t pte)
320 pte_val(pte) |= _PAGE_USER;
321 return(pte_mknewprot(pte));
324 static inline pte_t pte_mkexec(pte_t pte)
326 pte_val(pte) |= _PAGE_USER;
327 return(pte_mknewprot(pte));
330 static inline pte_t pte_mkdirty(pte_t pte)
332 pte_val(pte) |= _PAGE_DIRTY;
336 static inline pte_t pte_mkyoung(pte_t pte)
338 pte_val(pte) |= _PAGE_ACCESSED;
342 static inline pte_t pte_mkwrite(pte_t pte)
344 pte_val(pte) |= _PAGE_RW;
345 return(pte_mknewprot(pte));
348 static inline pte_t pte_mkuptodate(pte_t pte)
350 pte_val(pte) &= ~_PAGE_NEWPAGE;
351 if(pte_present(pte)) pte_val(pte) &= ~_PAGE_NEWPROT;
355 extern unsigned long page_to_phys(struct page *page);
358 * Conversion functions: convert a page and protection to a page entry,
359 * and a page entry and page directory to the page they refer to.
362 #define mk_pte(page, pgprot) \
366 pte_val(__pte) = page_to_phys(page) + pgprot_val(pgprot);\
367 if(pte_present(__pte)) pte_mknewprot(pte_mknewpage(__pte)); \
371 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
373 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
374 if(pte_present(pte)) pte = pte_mknewpage(pte_mknewprot(pte));
378 #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
379 #define pmd_page(pmd) (phys_mem_map(pmd_val(pmd) & PAGE_MASK) + \
380 ((phys_addr(pmd_val(pmd)) >> PAGE_SHIFT)))
383 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
385 * this macro returns the index of the entry in the pgd page which would
386 * control the given virtual address
388 #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
391 * pgd_offset() returns a (pgd_t *)
392 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
394 #define pgd_offset(mm, address) \
395 ((mm)->pgd + ((address) >> PGDIR_SHIFT))
399 * a shortcut which implies the use of the kernel's pgd, instead
402 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
404 #define pmd_index(address) \
405 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
407 /* Find an entry in the second-level page table.. */
408 static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
410 return (pmd_t *) dir;
414 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
416 * this macro returns the index of the entry in the pte page which would
417 * control the given virtual address
419 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
420 #define pte_offset_kernel(dir, address) \
421 ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
422 #define pte_offset_map(dir, address) \
423 ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
424 #define pte_offset_map_nested(dir, address) \
425 ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
426 #define pte_unmap(pte) kunmap_atomic((pte), KM_PTE0)
427 #define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
429 #if defined(CONFIG_HIGHPTE) && defined(CONFIG_HIGHMEM4G)
430 typedef u32 pte_addr_t;
433 #if defined(CONFIG_HIGHPTE) && defined(CONFIG_HIGHMEM64G)
434 typedef u64 pte_addr_t;
437 #if !defined(CONFIG_HIGHPTE)
438 typedef pte_t *pte_addr_t;
441 #define update_mmu_cache(vma,address,pte) do ; while (0)
443 /* Encode and de-code a swap entry */
444 #define __swp_type(x) (((x).val >> 4) & 0x3f)
445 #define __swp_offset(x) ((x).val >> 11)
447 #define __swp_entry(type, offset) \
448 ((swp_entry_t) { ((type) << 4) | ((offset) << 11) })
449 #define __pte_to_swp_entry(pte) \
450 ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
451 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
453 #define kern_addr_valid(addr) (1)
455 #include <asm-generic/pgtable.h>
461 * Overrides for Emacs so that we follow Linus's tabbing style.
462 * Emacs will notice this stuff at the end of the file and automatically
463 * adjust the settings for this buffer only. This must remain at the end
465 * ---------------------------------------------------------------------------
467 * c-file-style: "linux"