4 * (C) Copyright 1996 Linus Torvalds
6 * Address space accounting code <alan@redhat.com>
7 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
11 #include <linux/hugetlb.h>
12 #include <linux/slab.h>
13 #include <linux/shm.h>
14 #include <linux/mman.h>
15 #include <linux/swap.h>
17 #include <linux/highmem.h>
18 #include <linux/rmap.h>
19 #include <linux/security.h>
21 #include <asm/uaccess.h>
22 #include <asm/pgalloc.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
26 static pte_t *get_one_pte_map_nested(struct mm_struct *mm, unsigned long addr)
32 pgd = pgd_offset(mm, addr);
41 pmd = pmd_offset(pgd, addr);
50 pte = pte_offset_map_nested(pmd, addr);
52 pte_unmap_nested(pte);
59 static inline int page_table_present(struct mm_struct *mm, unsigned long addr)
64 pgd = pgd_offset(mm, addr);
67 pmd = pmd_offset(pgd, addr);
68 return pmd_present(*pmd);
71 static inline pte_t *alloc_one_pte_map(struct mm_struct *mm, unsigned long addr)
76 pmd = pmd_alloc(mm, pgd_offset(mm, addr), addr);
78 pte = pte_alloc_map(mm, pmd, addr);
83 can_move_one_pte(pte_t *src, unsigned long new_addr)
86 if (pte_present(*src)) {
87 unsigned long pfn = pte_pfn(*src);
89 struct page *page = pfn_to_page(pfn);
91 move = mremap_move_anon_rmap(page, new_addr);
98 move_one_page(struct vm_area_struct *vma, unsigned long old_addr,
99 unsigned long new_addr)
101 struct mm_struct *mm = vma->vm_mm;
105 spin_lock(&mm->page_table_lock);
106 src = get_one_pte_map_nested(mm, old_addr);
109 * Look to see whether alloc_one_pte_map needs to perform a
110 * memory allocation. If it does then we need to drop the
113 if (!page_table_present(mm, new_addr)) {
114 pte_unmap_nested(src);
117 dst = alloc_one_pte_map(mm, new_addr);
119 src = get_one_pte_map_nested(mm, old_addr);
121 * Since alloc_one_pte_map can drop and re-acquire
122 * page_table_lock, we should re-check the src entry...
127 else if (!can_move_one_pte(src, new_addr))
131 pte = ptep_clear_flush(vma, old_addr, src);
134 pte_unmap_nested(src);
139 spin_unlock(&mm->page_table_lock);
143 static unsigned long move_page_tables(struct vm_area_struct *vma,
144 unsigned long new_addr, unsigned long old_addr,
145 unsigned long len, int *cows)
147 unsigned long offset;
149 flush_cache_range(vma, old_addr, old_addr + len);
152 * This is not the clever way to do this, but we're taking the
153 * easy way out on the assumption that most remappings will be
154 * only a few pages.. This also makes error recovery easier.
156 for (offset = 0; offset < len; offset += PAGE_SIZE) {
157 int ret = move_one_page(vma, old_addr+offset, new_addr+offset);
159 * The anonmm objrmap can only track anon page movements
160 * if the page is exclusive to one mm. In the rare case
161 * when mremap move is applied to a shared page, break
162 * COW (take a copy of the page) to make it exclusive.
163 * If shared while on swap, page will be copied when
164 * brought back in (if it's still shared by then).
166 if (ret == -EAGAIN) {
167 ret = make_page_exclusive(vma, old_addr+offset);
178 static unsigned long move_vma(struct vm_area_struct *vma,
179 unsigned long old_addr, unsigned long old_len,
180 unsigned long new_len, unsigned long new_addr)
182 struct mm_struct *mm = vma->vm_mm;
183 struct vm_area_struct *new_vma;
184 unsigned long vm_flags = vma->vm_flags;
185 unsigned long new_pgoff;
186 unsigned long moved_len;
187 unsigned long excess = 0;
192 * We'd prefer to avoid failure later on in do_munmap:
193 * which may split one vma into three before unmapping.
195 if (mm->map_count >= sysctl_max_map_count - 3)
198 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
199 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff);
203 moved_len = move_page_tables(vma, new_addr, old_addr, old_len, &cows);
204 if (moved_len < old_len) {
206 * On error, move entries back from new area to old,
207 * which will succeed since page tables still there,
208 * and then proceed to unmap new area instead of old.
210 move_page_tables(new_vma, old_addr, new_addr, moved_len, &cows);
216 if (cows) /* Downgrade or remove this message later */
217 printk(KERN_WARNING "%s: mremap moved %d cows\n",
218 current->comm, cows);
220 /* Conceal VM_ACCOUNT so old reservation is not undone */
221 if (vm_flags & VM_ACCOUNT) {
222 vma->vm_flags &= ~VM_ACCOUNT;
223 excess = vma->vm_end - vma->vm_start - old_len;
224 if (old_addr > vma->vm_start &&
225 old_addr + old_len < vma->vm_end)
229 if (do_munmap(mm, old_addr, old_len) < 0) {
230 /* OOM: unable to split vma, just get accounts right */
231 vm_unacct_memory(excess >> PAGE_SHIFT);
235 /* Restore VM_ACCOUNT if one or two pieces of vma left */
237 vma->vm_flags |= VM_ACCOUNT;
239 vma->vm_next->vm_flags |= VM_ACCOUNT;
242 mm->total_vm += new_len >> PAGE_SHIFT;
243 if (vm_flags & VM_LOCKED) {
244 mm->locked_vm += new_len >> PAGE_SHIFT;
245 if (new_len > old_len)
246 make_pages_present(new_addr + old_len,
254 * Expand (or shrink) an existing mapping, potentially moving it at the
255 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
257 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
258 * This option implies MREMAP_MAYMOVE.
260 unsigned long do_mremap(unsigned long addr,
261 unsigned long old_len, unsigned long new_len,
262 unsigned long flags, unsigned long new_addr)
264 struct vm_area_struct *vma;
265 unsigned long ret = -EINVAL;
266 unsigned long charged = 0;
268 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
271 if (addr & ~PAGE_MASK)
274 old_len = PAGE_ALIGN(old_len);
275 new_len = PAGE_ALIGN(new_len);
278 * We allow a zero old-len as a special case
279 * for DOS-emu "duplicate shm area" thing. But
280 * a zero new-len is nonsensical.
285 /* new_addr is only valid if MREMAP_FIXED is specified */
286 if (flags & MREMAP_FIXED) {
287 if (new_addr & ~PAGE_MASK)
289 if (!(flags & MREMAP_MAYMOVE))
292 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
295 /* Check if the location we're moving into overlaps the
296 * old location at all, and fail if it does.
298 if ((new_addr <= addr) && (new_addr+new_len) > addr)
301 if ((addr <= new_addr) && (addr+old_len) > new_addr)
304 ret = do_munmap(current->mm, new_addr, new_len);
310 * Always allow a shrinking remap: that just unmaps
311 * the unnecessary pages..
312 * do_munmap does all the needed commit accounting
314 if (old_len >= new_len) {
315 ret = do_munmap(current->mm, addr+new_len, old_len - new_len);
316 if (ret && old_len != new_len)
319 if (!(flags & MREMAP_FIXED) || (new_addr == addr))
325 * Ok, we need to grow.. or relocate.
328 vma = find_vma(current->mm, addr);
329 if (!vma || vma->vm_start > addr)
331 if (is_vm_hugetlb_page(vma)) {
335 /* We can't remap across vm area boundaries */
336 if (old_len > vma->vm_end - addr)
338 if (vma->vm_flags & VM_DONTEXPAND) {
339 if (new_len > old_len)
342 if (vma->vm_flags & VM_LOCKED) {
343 unsigned long locked = current->mm->locked_vm << PAGE_SHIFT;
344 locked += new_len - old_len;
346 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
350 if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len)
351 > current->rlim[RLIMIT_AS].rlim_cur)
354 if (vma->vm_flags & VM_ACCOUNT) {
355 charged = (new_len - old_len) >> PAGE_SHIFT;
356 if (security_vm_enough_memory(charged))
360 /* old_len exactly to the end of the area..
361 * And we're not relocating the area.
363 if (old_len == vma->vm_end - addr &&
364 !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
365 (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
366 unsigned long max_addr = TASK_SIZE;
368 max_addr = vma->vm_next->vm_start;
369 /* can we just expand the current mapping? */
370 if (max_addr - addr >= new_len) {
371 int pages = (new_len - old_len) >> PAGE_SHIFT;
372 spin_lock(&vma->vm_mm->page_table_lock);
373 vma->vm_end = addr + new_len;
374 spin_unlock(&vma->vm_mm->page_table_lock);
375 current->mm->total_vm += pages;
376 if (vma->vm_flags & VM_LOCKED) {
377 current->mm->locked_vm += pages;
378 make_pages_present(addr + old_len,
387 * We weren't able to just expand or shrink the area,
388 * we need to create a new one and move it..
391 if (flags & MREMAP_MAYMOVE) {
392 if (!(flags & MREMAP_FIXED)) {
393 unsigned long map_flags = 0;
394 if (vma->vm_flags & VM_MAYSHARE)
395 map_flags |= MAP_SHARED;
397 new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
398 vma->vm_pgoff, map_flags);
400 if (new_addr & ~PAGE_MASK)
403 ret = move_vma(vma, addr, old_len, new_len, new_addr);
406 if (ret & ~PAGE_MASK)
407 vm_unacct_memory(charged);
412 asmlinkage unsigned long sys_mremap(unsigned long addr,
413 unsigned long old_len, unsigned long new_len,
414 unsigned long flags, unsigned long new_addr)
418 down_write(¤t->mm->mmap_sem);
419 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
420 up_write(¤t->mm->mmap_sem);