4 * (C) Copyright 1994 Linus Torvalds
8 * Modified from Linus source to removing active mappings from any
9 * task. This is required for implementing the virtual graphics
10 * interface for direct rendering on the SGI - miguel.
12 * Added a routine to map a vmalloc()ed area into user space, this one
13 * is required by the /dev/shmiq driver - miguel.
15 #include <linux/stat.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
19 #include <linux/smp.h>
20 #include <linux/smp_lock.h>
21 #include <linux/shm.h>
22 #include <linux/errno.h>
23 #include <linux/mman.h>
24 #include <linux/string.h>
25 #include <linux/vmalloc.h>
26 #include <linux/swap.h>
28 #include <asm/system.h>
29 #include <asm/pgalloc.h>
33 remove_mapping_pte_range (pmd_t *pmd, unsigned long address, unsigned long size)
41 printk ("remove_graphics_pte_range: bad pmd (%08lx)\n", pmd_val (*pmd));
45 pte = pte_offset (pmd, address);
52 if (pte_present (entry))
53 set_pte (pte, pte_modify (entry, PAGE_NONE));
56 } while (address < end);
61 remove_mapping_pmd_range (pgd_t *pgd, unsigned long address, unsigned long size)
70 printk ("remove_graphics_pmd_range: bad pgd (%08lx)\n", pgd_val (*pgd));
74 pmd = pmd_offset (pgd, address);
75 address &= ~PGDIR_MASK;
80 remove_mapping_pte_range (pmd, address, end - address);
81 address = (address + PMD_SIZE) & PMD_MASK;
83 } while (address < end);
88 * This routine is called from the page fault handler to remove a
89 * range of active mappings at this point
92 remove_mapping (struct task_struct *task, unsigned long start, unsigned long end)
94 unsigned long beg = start;
97 down (&task->mm->mmap_sem);
98 dir = pgd_offset (task->mm, start);
99 flush_cache_range (task->mm, beg, end);
101 remove_mapping_pmd_range (dir, start, end - start);
102 start = (start + PGDIR_SIZE) & PGDIR_MASK;
105 flush_tlb_range (task->mm, beg, end);
106 up (&task->mm->mmap_sem);
109 void *vmalloc_uncached (unsigned long size)
111 return __vmalloc (size, GFP_KERNEL | __GFP_HIGHMEM,
112 PAGE_KERNEL_UNCACHED);
115 static inline void free_pte(pte_t page)
117 if (pte_present(page)) {
118 struct page *ptpage = pte_page(page);
119 if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage))
122 if (current->mm->rss <= 0)
127 swap_free(pte_to_swp_entry(page));
130 static inline void forget_pte(pte_t page)
132 if (!pte_none(page)) {
133 printk("forget_pte: old mapping existed!\n");
139 * maps a range of vmalloc()ed memory into the requested pages. the old
140 * mappings are removed.
143 vmap_pte_range (pte_t *pte, unsigned long address, unsigned long size, unsigned long vaddr)
150 address &= ~PMD_MASK;
151 end = address + size;
155 pte_t oldpage = *pte;
159 vdir = pgd_offset_k (vaddr);
160 vpmd = pmd_offset (vdir, vaddr);
161 vpte = pte_offset (vpmd, vaddr);
162 page = pte_page (*vpte);
164 set_pte(pte, mk_pte(page, PAGE_USERIO));
166 address += PAGE_SIZE;
169 } while (address < end);
173 vmap_pmd_range (pmd_t *pmd, unsigned long address, unsigned long size, unsigned long vaddr)
177 address &= ~PGDIR_MASK;
178 end = address + size;
179 if (end > PGDIR_SIZE)
183 pte_t * pte = pte_alloc(pmd, address);
186 vmap_pte_range(pte, address, end - address, address + vaddr);
187 address = (address + PMD_SIZE) & PMD_MASK;
189 } while (address < end);
194 vmap_page_range (unsigned long from, unsigned long size, unsigned long vaddr)
198 unsigned long beg = from;
199 unsigned long end = from + size;
202 dir = pgd_offset(current->mm, from);
203 flush_cache_range(current->mm, beg, end);
205 pmd_t *pmd = pmd_alloc(dir, from);
209 error = vmap_pmd_range(pmd, from, end - from, vaddr + from);
212 from = (from + PGDIR_SIZE) & PGDIR_MASK;
215 flush_tlb_range(current->mm, beg, end);