UBUNTU: ubuntu: nx-emu - i386: mmap randomization for executable mappings
authorIngo Molnar <mingo@elte.hu>
Wed, 14 Jul 2010 07:55:23 +0000 (00:55 -0700)
committerLeann Ogasawara <leann.ogasawara@canonical.com>
Mon, 2 Apr 2012 20:09:52 +0000 (13:09 -0700)
This code is originally from Ingo Molnar, with some later rebasing and
fixes to respect all the randomization-disabling knobs. It provides
address randomization algorithm when NX emulation is in use in 32-bit
processes. Kees Cook pushed the brk area further away in the case of PIE
binaries landing their brk inside the CS limit.

Signed-off-by: Kees Cook <kees.cook@canonical.com>
Signed-off-by: Leann Ogasawara <leann.ogasawara@canonical.com>

arch/x86/kernel/process.c
arch/x86/mm/mmap.c
arch/x86/vdso/vdso32-setup.c
include/linux/mm.h
include/linux/mm_types.h
include/linux/sched.h
mm/mmap.c
mm/mremap.c

index ee5d4fb..b67e037 100644 (file)
@@ -663,6 +663,16 @@ unsigned long arch_align_stack(unsigned long sp)
 unsigned long arch_randomize_brk(struct mm_struct *mm)
 {
        unsigned long range_end = mm->brk + 0x02000000;
-       return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+       unsigned long bump = 0;
+#ifdef CONFIG_X86_32
+       /* when using ASLR in arch_get_unmapped_exec_area, we must shove
+          the brk segment way out of the way of the exec area, since it
+          can collide with future allocations if not. */
+       if ( (mm->get_unmapped_exec_area == arch_get_unmapped_exec_area) &&
+            (mm->brk < 0x08000000) ) {
+               bump = (TASK_SIZE/6);
+       }
+#endif
+       return bump + (randomize_range(mm->brk, range_end, 0) ? : mm->brk);
 }
 
index 845df68..d437466 100644 (file)
@@ -119,6 +119,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
        } else {
                mm->mmap_base = mmap_base();
                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+#ifdef CONFIG_X86_32
+               if (!(current->personality & READ_IMPLIES_EXEC)
+                   && !(__supported_pte_mask & _PAGE_NX)
+                   && mmap_is_ia32())
+                       mm->get_unmapped_exec_area = arch_get_unmapped_exec_area;
+#endif
                mm->unmap_area = arch_unmap_area_topdown;
        }
 }
index 468d591..df0ac00 100644 (file)
@@ -331,7 +331,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
        if (compat)
                addr = VDSO_HIGH_BASE;
        else {
-               addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+               addr = get_unmapped_area_prot(NULL, 0, PAGE_SIZE, 0, 0, 1);
                if (IS_ERR_VALUE(addr)) {
                        ret = addr;
                        goto up_fail;
index 4baadd1..3366b0a 100644 (file)
@@ -1396,7 +1396,13 @@ extern int install_special_mapping(struct mm_struct *mm,
                                   unsigned long addr, unsigned long len,
                                   unsigned long flags, struct page **pages);
 
-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+extern unsigned long get_unmapped_area_prot(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, int);
+
+static inline unsigned long get_unmapped_area(struct file *file, unsigned long addr,
+               unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+       return get_unmapped_area_prot(file, addr, len, pgoff, flags, 0);
+}
 
 extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
        unsigned long len, unsigned long prot,
index 5b42f1b..8bbc573 100644 (file)
@@ -294,6 +294,9 @@ struct mm_struct {
        unsigned long (*get_unmapped_area) (struct file *filp,
                                unsigned long addr, unsigned long len,
                                unsigned long pgoff, unsigned long flags);
+       unsigned long (*get_unmapped_exec_area) (struct file *filp,
+                               unsigned long addr, unsigned long len,
+                               unsigned long pgoff, unsigned long flags);
        void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
 #endif
        unsigned long mmap_base;                /* base of mmap area */
index 52011a7..bd0aeff 100644 (file)
@@ -391,6 +391,10 @@ extern void arch_pick_mmap_layout(struct mm_struct *mm);
 extern unsigned long
 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
                       unsigned long, unsigned long);
+
+extern unsigned long
+arch_get_unmapped_exec_area(struct file *, unsigned long, unsigned long,
+                      unsigned long, unsigned long);
 extern unsigned long
 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
                          unsigned long len, unsigned long pgoff,
index 7f48016..21685ca 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -30,6 +30,7 @@
 #include <linux/perf_event.h>
 #include <linux/audit.h>
 #include <linux/khugepaged.h>
+#include <linux/random.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -997,7 +998,8 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
        /* Obtain the address to map to. we verify (or select) it and ensure
         * that it represents a valid section of the address space.
         */
-       addr = get_unmapped_area(file, addr, len, pgoff, flags);
+       addr = get_unmapped_area_prot(file, addr, len, pgoff, flags,
+               prot & PROT_EXEC);
        if (addr & ~PAGE_MASK)
                return addr;
 
@@ -1550,8 +1552,8 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
 }
 
 unsigned long
-get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
-               unsigned long pgoff, unsigned long flags)
+get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
+               unsigned long pgoff, unsigned long flags, int exec)
 {
        unsigned long (*get_area)(struct file *, unsigned long,
                                  unsigned long, unsigned long, unsigned long);
@@ -1564,7 +1566,11 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
        if (len > TASK_SIZE)
                return -ENOMEM;
 
-       get_area = current->mm->get_unmapped_area;
+       if (exec && current->mm->get_unmapped_exec_area)
+               get_area = current->mm->get_unmapped_exec_area;
+       else
+               get_area = current->mm->get_unmapped_area;
+
        if (file && file->f_op && file->f_op->get_unmapped_area)
                get_area = file->f_op->get_unmapped_area;
        addr = get_area(file, addr, len, pgoff, flags);
@@ -1578,8 +1584,83 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
 
        return arch_rebalance_pgtables(addr, len);
 }
+EXPORT_SYMBOL(get_unmapped_area_prot);
+
+static bool should_randomize(void)
+{
+       return (current->flags & PF_RANDOMIZE) &&
+               !(current->personality & ADDR_NO_RANDOMIZE);
+}
+
+#define SHLIB_BASE     0x00110000
+
+unsigned long
+arch_get_unmapped_exec_area(struct file *filp, unsigned long addr0,
+               unsigned long len0, unsigned long pgoff, unsigned long flags)
+{
+       unsigned long addr = addr0, len = len0;
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       unsigned long tmp;
+
+       if (len > TASK_SIZE)
+               return -ENOMEM;
+
+       if (flags & MAP_FIXED)
+               return addr;
+
+       if (!addr)
+               addr = !should_randomize() ? SHLIB_BASE :
+                       randomize_range(SHLIB_BASE, 0x01000000, len);
+
+       if (addr) {
+               addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+               if (TASK_SIZE - len >= addr &&
+                   (!vma || addr + len <= vma->vm_start))
+                       return addr;
+       }
+
+       addr = SHLIB_BASE;
+       for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+               /* At this point:  (!vma || addr < vma->vm_end). */
+               if (TASK_SIZE - len < addr)
+                       return -ENOMEM;
+
+               if (!vma || addr + len <= vma->vm_start) {
+                       /*
+                        * Must not let a PROT_EXEC mapping get into the
+                        * brk area:
+                        */
+                       if (addr + len > mm->brk)
+                               goto failed;
+
+                       /*
+                        * Up until the brk area we randomize addresses
+                        * as much as possible:
+                        */
+                       if (addr >= 0x01000000 && should_randomize()) {
+                               tmp = randomize_range(0x01000000,
+                                       PAGE_ALIGN(max(mm->start_brk,
+                                       (unsigned long)0x08000000)), len);
+                               vma = find_vma(mm, tmp);
+                               if (TASK_SIZE - len >= tmp &&
+                                   (!vma || tmp + len <= vma->vm_start))
+                                       return tmp;
+                       }
+                       /*
+                        * Ok, randomization didnt work out - return
+                        * the result of the linear search:
+                        */
+                       return addr;
+               }
+               addr = vma->vm_end;
+       }
+
+failed:
+       return current->mm->get_unmapped_area(filp, addr0, len0, pgoff, flags);
+}
 
-EXPORT_SYMBOL(get_unmapped_area);
 
 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
index d6959cb..b028b64 100644 (file)
@@ -512,10 +512,10 @@ unsigned long do_mremap(unsigned long addr,
                if (vma->vm_flags & VM_MAYSHARE)
                        map_flags |= MAP_SHARED;
 
-               new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
+               new_addr = get_unmapped_area_prot(vma->vm_file, 0, new_len,
                                        vma->vm_pgoff +
                                        ((addr - vma->vm_start) >> PAGE_SHIFT),
-                                       map_flags);
+                                       map_flags, vma->vm_flags & VM_EXEC);
                if (new_addr & ~PAGE_MASK) {
                        ret = new_addr;
                        goto out;