Merge branch 'x86-x32-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-flexiantxendom0-3.2.10.git] / arch / x86 / vdso / vma.c
index 4b5d26f..00aaf04 100644 (file)
 #include <asm/vgtod.h>
 #include <asm/proto.h>
 #include <asm/vdso.h>
-
-#include "vextern.h"           /* Just for VMAGIC.  */
-#undef VEXTERN
+#include <asm/page.h>
 
 unsigned int __read_mostly vdso_enabled = 1;
 
 extern char vdso_start[], vdso_end[];
 extern unsigned short vdso_sync_cpuid;
 
-static struct page **vdso_pages;
+extern struct page *vdso_pages[];
 static unsigned vdso_size;
 
-static inline void *var_ref(void *p, char *name)
+#ifdef CONFIG_X86_X32_ABI
+extern char vdsox32_start[], vdsox32_end[];
+extern struct page *vdsox32_pages[];
+static unsigned vdsox32_size;
+
+static void __init patch_vdsox32(void *vdso, size_t len)
 {
-       if (*(void **)p != (void *)VMAGIC) {
-               printk("VDSO: variable %s broken\n", name);
-               vdso_enabled = 0;
+       Elf32_Ehdr *hdr = vdso;
+       Elf32_Shdr *sechdrs, *alt_sec = 0;
+       char *secstrings;
+       void *alt_data;
+       int i;
+
+       BUG_ON(len < sizeof(Elf32_Ehdr));
+       BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
+
+       sechdrs = (void *)hdr + hdr->e_shoff;
+       secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+       for (i = 1; i < hdr->e_shnum; i++) {
+               Elf32_Shdr *shdr = &sechdrs[i];
+               if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
+                       alt_sec = shdr;
+                       goto found;
+               }
        }
-       return p;
+
+       /* If we get here, it's probably a bug. */
+       pr_warning("patch_vdsox32: .altinstructions not found\n");
+       return;  /* nothing to patch */
+
+found:
+       alt_data = (void *)hdr + alt_sec->sh_offset;
+       apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
 }
+#endif
 
-static int __init init_vdso_vars(void)
+static void __init patch_vdso64(void *vdso, size_t len)
 {
-       int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
+       Elf64_Ehdr *hdr = vdso;
+       Elf64_Shdr *sechdrs, *alt_sec = 0;
+       char *secstrings;
+       void *alt_data;
        int i;
-       char *vbase;
 
-       vdso_size = npages << PAGE_SHIFT;
-       vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
-       if (!vdso_pages)
-               goto oom;
-       for (i = 0; i < npages; i++) {
-               struct page *p;
-               p = alloc_page(GFP_KERNEL);
-               if (!p)
-                       goto oom;
-               vdso_pages[i] = p;
-               copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
-       }
+       BUG_ON(len < sizeof(Elf64_Ehdr));
+       BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
 
-       vbase = vmap(vdso_pages, npages, 0, PAGE_KERNEL);
-       if (!vbase)
-               goto oom;
+       sechdrs = (void *)hdr + hdr->e_shoff;
+       secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 
-       if (memcmp(vbase, "\177ELF", 4)) {
-               printk("VDSO: I'm broken; not ELF\n");
-               vdso_enabled = 0;
+       for (i = 1; i < hdr->e_shnum; i++) {
+               Elf64_Shdr *shdr = &sechdrs[i];
+               if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
+                       alt_sec = shdr;
+                       goto found;
+               }
        }
 
-#define VEXTERN(x) \
-       *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
-#include "vextern.h"
-#undef VEXTERN
-       vunmap(vbase);
-       return 0;
+       /* If we get here, it's probably a bug. */
+       pr_warning("patch_vdso64: .altinstructions not found\n");
+       return;  /* nothing to patch */
 
- oom:
-       printk("Cannot allocate vdso\n");
-       vdso_enabled = 0;
-       return -ENOMEM;
+found:
+       alt_data = (void *)hdr + alt_sec->sh_offset;
+       apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
 }
-subsys_initcall(init_vdso_vars);
+
+static int __init init_vdso(void)
+{
+       int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
+       int i;
+
+       patch_vdso64(vdso_start, vdso_end - vdso_start);
+
+       vdso_size = npages << PAGE_SHIFT;
+       for (i = 0; i < npages; i++)
+               vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
+
+#ifdef CONFIG_X86_X32_ABI
+       patch_vdsox32(vdsox32_start, vdsox32_end - vdsox32_start);
+       npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE;
+       vdsox32_size = npages << PAGE_SHIFT;
+       for (i = 0; i < npages; i++)
+               vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE);
+#endif
+
+       return 0;
+}
+subsys_initcall(init_vdso);
 
 struct linux_binprm;
 
@@ -96,12 +134,24 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
        addr = start + (offset << PAGE_SHIFT);
        if (addr >= end)
                addr = end;
+
+       /*
+        * page-align it here so that get_unmapped_area doesn't
+        * align it wrongfully again to the next page. addr can come in 4K
+        * unaligned here as a result of stack start randomization.
+        */
+       addr = PAGE_ALIGN(addr);
+       addr = align_addr(addr, NULL, ALIGN_VDSO);
+
        return addr;
 }
 
 /* Setup a VMA at program startup for the vsyscall page.
    Not called for compat tasks */
-int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+static int setup_additional_pages(struct linux_binprm *bprm,
+                                 int uses_interp,
+                                 struct page **pages,
+                                 unsigned size)
 {
        struct mm_struct *mm = current->mm;
        unsigned long addr;
@@ -111,8 +161,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
                return 0;
 
        down_write(&mm->mmap_sem);
-       addr = vdso_addr(mm->start_stack, vdso_size);
-       addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
+       addr = vdso_addr(mm->start_stack, size);
+       addr = get_unmapped_area(NULL, addr, size, 0, 0);
        if (IS_ERR_VALUE(addr)) {
                ret = addr;
                goto up_fail;
@@ -120,11 +170,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 
        current->mm->context.vdso = (void *)addr;
 
-       ret = install_special_mapping(mm, addr, vdso_size,
+       ret = install_special_mapping(mm, addr, size,
                                      VM_READ|VM_EXEC|
-                                     VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
-                                     VM_ALWAYSDUMP,
-                                     vdso_pages);
+                                     VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+                                     pages);
        if (ret) {
                current->mm->context.vdso = NULL;
                goto up_fail;
@@ -135,6 +184,20 @@ up_fail:
        return ret;
 }
 
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+       return setup_additional_pages(bprm, uses_interp, vdso_pages,
+                                     vdso_size);
+}
+
+#ifdef CONFIG_X86_X32_ABI
+int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+       return setup_additional_pages(bprm, uses_interp, vdsox32_pages,
+                                     vdsox32_size);
+}
+#endif
+
 static __init int vdso_setup(char *s)
 {
        vdso_enabled = simple_strtoul(s, NULL, 0);