mmap randomization for executable mappings on 32-bit
authorRoland McGrath <roland@redhat.com>
Wed, 14 Jul 2010 07:55:23 +0000 (00:55 -0700)
committerLeann Ogasawara <leann.ogasawara@canonical.com>
Mon, 28 Mar 2011 13:50:18 +0000 (06:50 -0700)
This code is originally from Ingo, with some later rebasing and
fixes to respect all the randomization-disabling knobs.

It provides a new address randomization algorithm that's preferable
for executable mappings in 32-bit processes.

Signed-off-by: Roland McGrath <roland@redhat.com>
Signed-off-by: Kees Cook <kees.cook@canonical.com>
Signed-off-by: Andy Whitcroft <apw@canonical.com>

arch/x86/mm/mmap.c
arch/x86/vdso/vdso32-setup.c
include/linux/mm.h
include/linux/mm_types.h
include/linux/sched.h
mm/mmap.c
mm/mremap.c

index 1dab519..619fff6 100644 (file)
@@ -131,6 +131,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
        } else {
                mm->mmap_base = mmap_base();
                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+               if (!(current->personality & READ_IMPLIES_EXEC)
+                   && mmap_is_ia32())
+                       mm->get_unmapped_exec_area = arch_get_unmapped_exec_area;
                mm->unmap_area = arch_unmap_area_topdown;
        }
 }
index 36df991..04ba02a 100644 (file)
@@ -331,7 +331,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
        if (compat)
                addr = VDSO_HIGH_BASE;
        else {
-               addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+               addr = get_unmapped_area_prot(NULL, 0, PAGE_SIZE, 0, 0, 1);
                if (IS_ERR_VALUE(addr)) {
                        ret = addr;
                        goto up_fail;
index c67adb4..4e43460 100644 (file)
@@ -1415,7 +1415,13 @@ extern int install_special_mapping(struct mm_struct *mm,
                                   unsigned long addr, unsigned long len,
                                   unsigned long flags, struct page **pages);
 
-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+extern unsigned long get_unmapped_area_prot(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, int);
+
+static inline unsigned long get_unmapped_area(struct file *file, unsigned long addr,
+               unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+       return get_unmapped_area_prot(file, addr, len, pgoff, flags, 0);
+}
 
 extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
        unsigned long len, unsigned long prot,
index 26bc4e2..4f1707d 100644 (file)
@@ -227,6 +227,9 @@ struct mm_struct {
        unsigned long (*get_unmapped_area) (struct file *filp,
                                unsigned long addr, unsigned long len,
                                unsigned long pgoff, unsigned long flags);
+       unsigned long (*get_unmapped_exec_area) (struct file *filp,
+                               unsigned long addr, unsigned long len,
+                               unsigned long pgoff, unsigned long flags);
        void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
 #endif
        unsigned long mmap_base;                /* base of mmap area */
index 255325c..dafb4ef 100644 (file)
@@ -391,6 +391,10 @@ extern void arch_pick_mmap_layout(struct mm_struct *mm);
 extern unsigned long
 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
                       unsigned long, unsigned long);
+
+extern unsigned long
+arch_get_unmapped_exec_area(struct file *, unsigned long, unsigned long,
+                      unsigned long, unsigned long);
 extern unsigned long
 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
                          unsigned long len, unsigned long pgoff,
index 93b8a93..d908a25 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -30,6 +30,7 @@
 #include <linux/perf_event.h>
 #include <linux/audit.h>
 #include <linux/khugepaged.h>
+#include <linux/random.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -1037,7 +1038,8 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
        /* Obtain the address to map to. we verify (or select) it and ensure
         * that it represents a valid section of the address space.
         */
-       addr = get_unmapped_area(file, addr, len, pgoff, flags);
+       addr = get_unmapped_area_prot(file, addr, len, pgoff, flags,
+               prot & PROT_EXEC);
        if (addr & ~PAGE_MASK)
                return addr;
 
@@ -1590,8 +1592,8 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
 }
 
 unsigned long
-get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
-               unsigned long pgoff, unsigned long flags)
+get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
+               unsigned long pgoff, unsigned long flags, int exec)
 {
        unsigned long (*get_area)(struct file *, unsigned long,
                                  unsigned long, unsigned long, unsigned long);
@@ -1604,7 +1606,11 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
        if (len > TASK_SIZE)
                return -ENOMEM;
 
-       get_area = current->mm->get_unmapped_area;
+       if (exec && current->mm->get_unmapped_exec_area)
+               get_area = current->mm->get_unmapped_exec_area;
+       else
+               get_area = current->mm->get_unmapped_area;
+
        if (file && file->f_op && file->f_op->get_unmapped_area)
                get_area = file->f_op->get_unmapped_area;
        addr = get_area(file, addr, len, pgoff, flags);
@@ -1618,8 +1624,83 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
 
        return arch_rebalance_pgtables(addr, len);
 }
+EXPORT_SYMBOL(get_unmapped_area_prot);
+
+static bool should_randomize(void)
+{
+       return (current->flags & PF_RANDOMIZE) &&
+               !(current->personality & ADDR_NO_RANDOMIZE);
+}
+
+#define SHLIB_BASE     0x00110000
+
+unsigned long
+arch_get_unmapped_exec_area(struct file *filp, unsigned long addr0,
+               unsigned long len0, unsigned long pgoff, unsigned long flags)
+{
+       unsigned long addr = addr0, len = len0;
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       unsigned long tmp;
+
+       if (len > TASK_SIZE)
+               return -ENOMEM;
+
+       if (flags & MAP_FIXED)
+               return addr;
+
+       if (!addr)
+               addr = !should_randomize() ? SHLIB_BASE :
+                       randomize_range(SHLIB_BASE, 0x01000000, len);
+
+       if (addr) {
+               addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+               if (TASK_SIZE - len >= addr &&
+                   (!vma || addr + len <= vma->vm_start))
+                       return addr;
+       }
+
+       addr = SHLIB_BASE;
+       for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+               /* At this point:  (!vma || addr < vma->vm_end). */
+               if (TASK_SIZE - len < addr)
+                       return -ENOMEM;
+
+               if (!vma || addr + len <= vma->vm_start) {
+                       /*
+                        * Must not let a PROT_EXEC mapping get into the
+                        * brk area:
+                        */
+                       if (addr + len > mm->brk)
+                               goto failed;
+
+                       /*
+                        * Up until the brk area we randomize addresses
+                        * as much as possible:
+                        */
+                       if (addr >= 0x01000000 && should_randomize()) {
+                               tmp = randomize_range(0x01000000,
+                                       PAGE_ALIGN(max(mm->start_brk,
+                                       (unsigned long)0x08000000)), len);
+                               vma = find_vma(mm, tmp);
+                               if (TASK_SIZE - len >= tmp &&
+                                   (!vma || tmp + len <= vma->vm_start))
+                                       return tmp;
+                       }
+                       /*
+                        * Ok, randomization didnt work out - return
+                        * the result of the linear search:
+                        */
+                       return addr;
+               }
+               addr = vma->vm_end;
+       }
+
+failed:
+       return current->mm->get_unmapped_area(filp, addr0, len0, pgoff, flags);
+}
 
-EXPORT_SYMBOL(get_unmapped_area);
 
 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
index 1de98d4..cb6a8cb 100644 (file)
@@ -488,10 +488,10 @@ unsigned long do_mremap(unsigned long addr,
                if (vma->vm_flags & VM_MAYSHARE)
                        map_flags |= MAP_SHARED;
 
-               new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
+               new_addr = get_unmapped_area_prot(vma->vm_file, 0, new_len,
                                        vma->vm_pgoff +
                                        ((addr - vma->vm_start) >> PAGE_SHIFT),
-                                       map_flags);
+                                       map_flags, vma->vm_flags & VM_EXEC);
                if (new_addr & ~PAGE_MASK) {
                        ret = new_addr;
                        goto out;