Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / mm / mremap.c
index a39b7b9..db8d983 100644 (file)
@@ -9,8 +9,8 @@
 
 #include <linux/mm.h>
 #include <linux/hugetlb.h>
-#include <linux/slab.h>
 #include <linux/shm.h>
+#include <linux/ksm.h>
 #include <linux/mman.h>
 #include <linux/swap.h>
 #include <linux/capability.h>
@@ -41,13 +41,14 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
                return NULL;
 
        pmd = pmd_offset(pud, addr);
-       if (pmd_none_or_clear_bad(pmd))
+       if (pmd_none(*pmd))
                return NULL;
 
        return pmd;
 }
 
-static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
+static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
+                           unsigned long addr)
 {
        pgd_t *pgd;
        pud_t *pud;
@@ -62,8 +63,7 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
        if (!pmd)
                return NULL;
 
-       if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr))
-               return NULL;
+       VM_BUG_ON(pmd_trans_huge(*pmd));
 
        return pmd;
 }
@@ -77,23 +77,16 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
        struct mm_struct *mm = vma->vm_mm;
        pte_t *old_pte, *new_pte, pte;
        spinlock_t *old_ptl, *new_ptl;
-       unsigned long old_start;
 
-       old_start = old_addr;
-       mmu_notifier_invalidate_range_start(vma->vm_mm,
-                                           old_start, old_end);
        if (vma->vm_file) {
                /*
                 * Subtle point from Rajesh Venkatasubramanian: before
-                * moving file-based ptes, we must lock vmtruncate out,
-                * since it might clean the dst vma before the src vma,
+                * moving file-based ptes, we must lock truncate_pagecache
+                * out, since it might clean the dst vma before the src vma,
                 * and we propagate stale pages into the dst afterward.
                 */
                mapping = vma->vm_file->f_mapping;
-               spin_lock(&mapping->i_mmap_lock);
-               if (new_vma->vm_truncate_count &&
-                   new_vma->vm_truncate_count != vma->vm_truncate_count)
-                       new_vma->vm_truncate_count = 0;
+               mutex_lock(&mapping->i_mmap_mutex);
        }
 
        /*
@@ -101,7 +94,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
         * pte locks because exclusive mmap_sem prevents deadlock.
         */
        old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
-       new_pte = pte_offset_map_nested(new_pmd, new_addr);
+       new_pte = pte_offset_map(new_pmd, new_addr);
        new_ptl = pte_lockptr(mm, new_pmd);
        if (new_ptl != old_ptl)
                spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
@@ -111,7 +104,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
                                   new_pte++, new_addr += PAGE_SIZE) {
                if (pte_none(*old_pte))
                        continue;
-               pte = ptep_clear_flush(vma, old_addr, old_pte);
+               pte = ptep_get_and_clear(mm, old_addr, old_pte);
                pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
                set_pte_at(mm, new_addr, new_pte, pte);
        }
@@ -119,11 +112,10 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
        arch_leave_lazy_mmu_mode();
        if (new_ptl != old_ptl)
                spin_unlock(new_ptl);
-       pte_unmap_nested(new_pte - 1);
+       pte_unmap(new_pte - 1);
        pte_unmap_unlock(old_pte - 1, old_ptl);
        if (mapping)
-               spin_unlock(&mapping->i_mmap_lock);
-       mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end);
+               mutex_unlock(&mapping->i_mmap_mutex);
 }
 
 #define LATENCY_LIMIT  (64 * PAGE_SIZE)
@@ -134,22 +126,43 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
 {
        unsigned long extent, next, old_end;
        pmd_t *old_pmd, *new_pmd;
+       bool need_flush = false;
 
        old_end = old_addr + len;
        flush_cache_range(vma, old_addr, old_end);
 
+       mmu_notifier_invalidate_range_start(vma->vm_mm, old_addr, old_end);
+
        for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
                cond_resched();
                next = (old_addr + PMD_SIZE) & PMD_MASK;
-               if (next - 1 > old_end)
-                       next = old_end;
+               /* even if next overflowed, extent below will be ok */
                extent = next - old_addr;
+               if (extent > old_end - old_addr)
+                       extent = old_end - old_addr;
                old_pmd = get_old_pmd(vma->vm_mm, old_addr);
                if (!old_pmd)
                        continue;
-               new_pmd = alloc_new_pmd(vma->vm_mm, new_addr);
+               new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
                if (!new_pmd)
                        break;
+               if (pmd_trans_huge(*old_pmd)) {
+                       int err = 0;
+                       if (extent == HPAGE_PMD_SIZE)
+                               err = move_huge_pmd(vma, new_vma, old_addr,
+                                                   new_addr, old_end,
+                                                   old_pmd, new_pmd);
+                       if (err > 0) {
+                               need_flush = true;
+                               continue;
+                       } else if (!err) {
+                               split_huge_page_pmd(vma->vm_mm, old_pmd);
+                       }
+                       VM_BUG_ON(pmd_trans_huge(*old_pmd));
+               }
+               if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma,
+                                                     new_pmd, new_addr))
+                       break;
                next = (new_addr + PMD_SIZE) & PMD_MASK;
                if (extent > next - new_addr)
                        extent = next - new_addr;
@@ -157,7 +170,12 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                        extent = LATENCY_LIMIT;
                move_ptes(vma, old_pmd, old_addr, old_addr + extent,
                                new_vma, new_pmd, new_addr);
+               need_flush = true;
        }
+       if (likely(need_flush))
+               flush_tlb_range(vma, old_end-len, old_addr);
+
+       mmu_notifier_invalidate_range_end(vma->vm_mm, old_end-len, old_end);
 
        return len + old_addr - old_end;        /* how much done */
 }
@@ -174,6 +192,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
        unsigned long excess = 0;
        unsigned long hiwater_vm;
        int split = 0;
+       int err;
 
        /*
         * We'd prefer to avoid failure later on in do_munmap:
@@ -182,6 +201,18 @@ static unsigned long move_vma(struct vm_area_struct *vma,
        if (mm->map_count >= sysctl_max_map_count - 3)
                return -ENOMEM;
 
+       /*
+        * Advise KSM to break any KSM pages in the area to be moved:
+        * it would be confusing if they were to turn up at the new
+        * location, where they happen to coincide with different KSM
+        * pages recently unmapped.  But leave vma->vm_flags as it was,
+        * so KSM can come around to merge on vma and new_vma afterwards.
+        */
+       err = ksm_madvise(vma, old_addr, old_addr + old_len,
+                                               MADV_UNMERGEABLE, &vm_flags);
+       if (err)
+               return err;
+
        new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
        new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff);
        if (!new_vma)
@@ -190,6 +221,15 @@ static unsigned long move_vma(struct vm_area_struct *vma,
        moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len);
        if (moved_len < old_len) {
                /*
+                * Before moving the page tables from the new vma to
+                * the old vma, we need to be sure the old vma is
+                * queued after new vma in the same_anon_vma list to
+                * prevent SMP races with rmap_walk (that could lead
+                * rmap_walk to miss some page table).
+                */
+               anon_vma_moveto_tail(vma);
+
+               /*
                 * On error, move entries back from new area to old,
                 * which will succeed since page tables still there,
                 * and then proceed to unmap new area instead of old.
@@ -247,6 +287,144 @@ static unsigned long move_vma(struct vm_area_struct *vma,
        return new_addr;
 }
 
+static struct vm_area_struct *vma_to_resize(unsigned long addr,
+       unsigned long old_len, unsigned long new_len, unsigned long *p)
+{
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma = find_vma(mm, addr);
+
+       if (!vma || vma->vm_start > addr)
+               goto Efault;
+
+       if (is_vm_hugetlb_page(vma))
+               goto Einval;
+
+       /* We can't remap across vm area boundaries */
+       if (old_len > vma->vm_end - addr)
+               goto Efault;
+
+       /* Need to be careful about a growing mapping */
+       if (new_len > old_len) {
+               unsigned long pgoff;
+
+               if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
+                       goto Efault;
+               pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
+               pgoff += vma->vm_pgoff;
+               if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
+                       goto Einval;
+       }
+
+       if (vma->vm_flags & VM_LOCKED) {
+               unsigned long locked, lock_limit;
+               locked = mm->locked_vm << PAGE_SHIFT;
+               lock_limit = rlimit(RLIMIT_MEMLOCK);
+               locked += new_len - old_len;
+               if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+                       goto Eagain;
+       }
+
+       if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
+               goto Enomem;
+
+       if (vma->vm_flags & VM_ACCOUNT) {
+               unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
+               if (security_vm_enough_memory_mm(mm, charged))
+                       goto Efault;
+               *p = charged;
+       }
+
+       return vma;
+
+Efault:        /* very odd choice for most of the cases, but... */
+       return ERR_PTR(-EFAULT);
+Einval:
+       return ERR_PTR(-EINVAL);
+Enomem:
+       return ERR_PTR(-ENOMEM);
+Eagain:
+       return ERR_PTR(-EAGAIN);
+}
+
+static unsigned long mremap_to(unsigned long addr,
+       unsigned long old_len, unsigned long new_addr,
+       unsigned long new_len)
+{
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       unsigned long ret = -EINVAL;
+       unsigned long charged = 0;
+       unsigned long map_flags;
+
+       if (new_addr & ~PAGE_MASK)
+               goto out;
+
+       if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
+               goto out;
+
+       /* Check if the location we're moving into overlaps the
+        * old location at all, and fail if it does.
+        */
+       if ((new_addr <= addr) && (new_addr+new_len) > addr)
+               goto out;
+
+       if ((addr <= new_addr) && (addr+old_len) > new_addr)
+               goto out;
+
+       ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
+       if (ret)
+               goto out;
+
+       ret = do_munmap(mm, new_addr, new_len);
+       if (ret)
+               goto out;
+
+       if (old_len >= new_len) {
+               ret = do_munmap(mm, addr+new_len, old_len - new_len);
+               if (ret && old_len != new_len)
+                       goto out;
+               old_len = new_len;
+       }
+
+       vma = vma_to_resize(addr, old_len, new_len, &charged);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto out;
+       }
+
+       map_flags = MAP_FIXED;
+       if (vma->vm_flags & VM_MAYSHARE)
+               map_flags |= MAP_SHARED;
+
+       ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
+                               ((addr - vma->vm_start) >> PAGE_SHIFT),
+                               map_flags);
+       if (ret & ~PAGE_MASK)
+               goto out1;
+
+       ret = move_vma(vma, addr, old_len, new_len, new_addr);
+       if (!(ret & ~PAGE_MASK))
+               goto out;
+out1:
+       vm_unacct_memory(charged);
+
+out:
+       return ret;
+}
+
+static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
+{
+       unsigned long end = vma->vm_end + delta;
+       if (end < vma->vm_end) /* overflow */
+               return 0;
+       if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
+               return 0;
+       if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
+                             0, MAP_FIXED) & ~PAGE_MASK)
+               return 0;
+       return 1;
+}
+
 /*
  * Expand (or shrink) an existing mapping, potentially moving it at the
  * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
@@ -280,32 +458,10 @@ unsigned long do_mremap(unsigned long addr,
        if (!new_len)
                goto out;
 
-       /* new_addr is only valid if MREMAP_FIXED is specified */
        if (flags & MREMAP_FIXED) {
-               if (new_addr & ~PAGE_MASK)
-                       goto out;
-               if (!(flags & MREMAP_MAYMOVE))
-                       goto out;
-
-               if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
-                       goto out;
-
-               /* Check if the location we're moving into overlaps the
-                * old location at all, and fail if it does.
-                */
-               if ((new_addr <= addr) && (new_addr+new_len) > addr)
-                       goto out;
-
-               if ((addr <= new_addr) && (addr+old_len) > new_addr)
-                       goto out;
-
-               ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
-               if (ret)
-                       goto out;
-
-               ret = do_munmap(mm, new_addr, new_len);
-               if (ret)
-                       goto out;
+               if (flags & MREMAP_MAYMOVE)
+                       ret = mremap_to(addr, old_len, new_addr, new_len);
+               goto out;
        }
 
        /*
@@ -318,64 +474,30 @@ unsigned long do_mremap(unsigned long addr,
                if (ret && old_len != new_len)
                        goto out;
                ret = addr;
-               if (!(flags & MREMAP_FIXED) || (new_addr == addr))
-                       goto out;
-               old_len = new_len;
+               goto out;
        }
 
        /*
-        * Ok, we need to grow..  or relocate.
+        * Ok, we need to grow..
         */
-       ret = -EFAULT;
-       vma = find_vma(mm, addr);
-       if (!vma || vma->vm_start > addr)
-               goto out;
-       if (is_vm_hugetlb_page(vma)) {
-               ret = -EINVAL;
-               goto out;
-       }
-       /* We can't remap across vm area boundaries */
-       if (old_len > vma->vm_end - addr)
-               goto out;
-       if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
-               if (new_len > old_len)
-                       goto out;
-       }
-       if (vma->vm_flags & VM_LOCKED) {
-               unsigned long locked, lock_limit;
-               locked = mm->locked_vm << PAGE_SHIFT;
-               lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
-               locked += new_len - old_len;
-               ret = -EAGAIN;
-               if (locked > lock_limit && !capable(CAP_IPC_LOCK))
-                       goto out;
-       }
-       if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) {
-               ret = -ENOMEM;
+       vma = vma_to_resize(addr, old_len, new_len, &charged);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
                goto out;
        }
 
-       if (vma->vm_flags & VM_ACCOUNT) {
-               charged = (new_len - old_len) >> PAGE_SHIFT;
-               if (security_vm_enough_memory(charged))
-                       goto out_nc;
-       }
-
        /* old_len exactly to the end of the area..
-        * And we're not relocating the area.
         */
-       if (old_len == vma->vm_end - addr &&
-           !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
-           (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
-               unsigned long max_addr = TASK_SIZE;
-               if (vma->vm_next)
-                       max_addr = vma->vm_next->vm_start;
+       if (old_len == vma->vm_end - addr) {
                /* can we just expand the current mapping? */
-               if (max_addr - addr >= new_len) {
+               if (vma_expandable(vma, new_len - old_len)) {
                        int pages = (new_len - old_len) >> PAGE_SHIFT;
 
-                       vma_adjust(vma, vma->vm_start,
-                               addr + new_len, vma->vm_pgoff, NULL);
+                       if (vma_adjust(vma, vma->vm_start, addr + new_len,
+                                      vma->vm_pgoff, NULL)) {
+                               ret = -ENOMEM;
+                               goto out;
+                       }
 
                        mm->total_vm += pages;
                        vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
@@ -395,28 +517,27 @@ unsigned long do_mremap(unsigned long addr,
         */
        ret = -ENOMEM;
        if (flags & MREMAP_MAYMOVE) {
-               if (!(flags & MREMAP_FIXED)) {
-                       unsigned long map_flags = 0;
-                       if (vma->vm_flags & VM_MAYSHARE)
-                               map_flags |= MAP_SHARED;
-
-                       new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
-                                               vma->vm_pgoff, map_flags);
-                       if (new_addr & ~PAGE_MASK) {
-                               ret = new_addr;
-                               goto out;
-                       }
-
-                       ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
-                       if (ret)
-                               goto out;
+               unsigned long map_flags = 0;
+               if (vma->vm_flags & VM_MAYSHARE)
+                       map_flags |= MAP_SHARED;
+
+               new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
+                                       vma->vm_pgoff +
+                                       ((addr - vma->vm_start) >> PAGE_SHIFT),
+                                       map_flags);
+               if (new_addr & ~PAGE_MASK) {
+                       ret = new_addr;
+                       goto out;
                }
+
+               ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
+               if (ret)
+                       goto out;
                ret = move_vma(vma, addr, old_len, new_len, new_addr);
        }
 out:
        if (ret & ~PAGE_MASK)
                vm_unacct_memory(charged);
-out_nc:
        return ret;
 }