#include <asm/msr.h>
#include <asm/pgtable.h>
#include <asm/unistd.h>
+#include <asm/elf.h>
+#include <asm/tlbflush.h>
-#ifdef CONFIG_XEN
-#include <xen/interface/callback.h>
-extern const unsigned long vdso_rel_int80_start[], vdso_rel_int80_end[];
-extern const unsigned long vdso_rel_sysenter_start[], vdso_rel_sysenter_end[];
+enum {
+ VDSO_DISABLED = 0,
+ VDSO_ENABLED = 1,
+ VDSO_COMPAT = 2,
+};
+
+#ifdef CONFIG_COMPAT_VDSO
+#define VDSO_DEFAULT VDSO_COMPAT
+#else
+#define VDSO_DEFAULT VDSO_ENABLED
#endif
/*
* Should the kernel map a VDSO page into processes and pass its
* address down to glibc upon exec()?
*/
-#ifdef CONFIG_PARAVIRT
-unsigned int __read_mostly vdso_enabled = 0;
-#else
-unsigned int __read_mostly vdso_enabled = 1;
-#endif
+unsigned int __read_mostly vdso_enabled = VDSO_DEFAULT;
EXPORT_SYMBOL_GPL(vdso_enabled);
extern asmlinkage void sysenter_entry(void);
-void enable_sep_cpu(void)
+static __init void reloc_symtab(Elf32_Ehdr *ehdr,
+ unsigned offset, unsigned size)
{
-#ifndef CONFIG_X86_NO_TSS
- int cpu = get_cpu();
- struct tss_struct *tss = &per_cpu(init_tss, cpu);
+ Elf32_Sym *sym = (void *)ehdr + offset;
+ unsigned nsym = size / sizeof(*sym);
+ unsigned i;
+
+ for(i = 0; i < nsym; i++, sym++) {
+ if (sym->st_shndx == SHN_UNDEF ||
+ sym->st_shndx == SHN_ABS)
+ continue; /* skip */
+
+ if (sym->st_shndx > SHN_LORESERVE) {
+ printk(KERN_INFO "VDSO: unexpected st_shndx %x\n",
+ sym->st_shndx);
+ continue;
+ }
- if (!boot_cpu_has(X86_FEATURE_SEP)) {
- put_cpu();
- return;
+ switch(ELF_ST_TYPE(sym->st_info)) {
+ case STT_OBJECT:
+ case STT_FUNC:
+ case STT_SECTION:
+ case STT_FILE:
+ sym->st_value += VDSO_HIGH_BASE;
+ }
}
-
- tss->ss1 = __KERNEL_CS;
- tss->esp1 = sizeof(struct tss_struct) + (unsigned long) tss;
- wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
- wrmsr(MSR_IA32_SYSENTER_ESP, tss->esp1, 0);
- wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
- put_cpu();
-#endif
}
-#if defined(CONFIG_XEN) && defined(CONFIG_COMPAT_VDSO)
-static void __init relocate_vdso(Elf32_Ehdr *ehdr, unsigned long old_base, unsigned long new_base,
- const unsigned long *reloc_start, const unsigned long *reloc_end)
+static __init void reloc_dyn(Elf32_Ehdr *ehdr, unsigned offset)
{
-#if 1
- const unsigned long *reloc;
-
- for (reloc = reloc_start; reloc < reloc_end; ++reloc) {
- unsigned long *ptr = (void *)((unsigned long)ehdr + *reloc);
-
- *ptr += new_base - old_base;
- }
-#else
- unsigned i, ndynsym = 0, szdynsym = 0;
- unsigned long dynsym = 0;
-
- BUG_ON(ehdr->e_ident[EI_MAG0] != ELFMAG0);
- BUG_ON(ehdr->e_ident[EI_MAG1] != ELFMAG1);
- BUG_ON(ehdr->e_ident[EI_MAG2] != ELFMAG2);
- BUG_ON(ehdr->e_ident[EI_MAG3] != ELFMAG3);
- BUG_ON(ehdr->e_ident[EI_CLASS] != ELFCLASS32);
- BUG_ON(ehdr->e_ident[EI_DATA] != ELFDATA2LSB);
- BUG_ON(ehdr->e_ehsize < sizeof(*ehdr));
- ehdr->e_entry += new_base - old_base;
- BUG_ON(ehdr->e_phentsize < sizeof(Elf32_Phdr));
- for (i = 0; i < ehdr->e_phnum; ++i) {
- Elf32_Phdr *phdr = (void *)((unsigned long)ehdr + ehdr->e_phoff + i * ehdr->e_phentsize);
-
- phdr->p_vaddr += new_base - old_base;
- switch(phdr->p_type) {
- case PT_LOAD:
- case PT_NOTE:
+ Elf32_Dyn *dyn = (void *)ehdr + offset;
+
+ for(; dyn->d_tag != DT_NULL; dyn++)
+ switch(dyn->d_tag) {
+ case DT_PLTGOT:
+ case DT_HASH:
+ case DT_STRTAB:
+ case DT_SYMTAB:
+ case DT_RELA:
+ case DT_INIT:
+ case DT_FINI:
+ case DT_REL:
+ case DT_DEBUG:
+ case DT_JMPREL:
+ case DT_VERSYM:
+ case DT_VERDEF:
+ case DT_VERNEED:
+ case DT_ADDRRNGLO ... DT_ADDRRNGHI:
+ /* definitely pointers needing relocation */
+ dyn->d_un.d_ptr += VDSO_HIGH_BASE;
break;
- case PT_DYNAMIC: {
- Elf32_Dyn *dyn = (void *)(phdr->p_vaddr - new_base + (unsigned long)ehdr);
- unsigned j;
-
- for(j = 0; dyn[j].d_tag != DT_NULL; ++j) {
- switch(dyn[j].d_tag) {
- case DT_HASH:
- case DT_STRTAB:
- case DT_SYMTAB:
- case 0x6ffffff0: /* DT_VERSYM */
- case 0x6ffffffc: /* DT_VERDEF */
- break;
- case DT_SONAME:
- case DT_STRSZ:
- case 0x6ffffffd: /* DT_VERDEFNUM */
- continue;
- case DT_SYMENT:
- szdynsym = dyn[j].d_un.d_val;
- continue;
- default:
- if (dyn[j].d_tag >= 0x60000000 /* OLD_DT_LOOS */
- || dyn[j].d_tag < 31 /* DT_ENCODING */
- || !(dyn[j].d_tag & 1)) {
- printk(KERN_WARNING "vDSO dynamic info %u has unsupported tag %08X\n", j, dyn[j].d_tag);
- WARN_ON(1);
- continue;
- }
- break;
- }
- dyn[j].d_un.d_ptr += new_base - old_base;
- switch(dyn[j].d_tag) {
- case DT_HASH:
- ndynsym = ((Elf32_Word *)dyn[j].d_un.d_ptr)[1];
- break;
- case DT_SYMTAB:
- dynsym = dyn[j].d_un.d_ptr;
- break;
- }
- }
- }
+
+ case DT_ENCODING ... OLD_DT_LOOS-1:
+ case DT_LOOS ... DT_HIOS-1:
+ /* Tags above DT_ENCODING are pointers if
+ they're even */
+ if (dyn->d_tag >= DT_ENCODING &&
+ (dyn->d_tag & 1) == 0)
+ dyn->d_un.d_ptr += VDSO_HIGH_BASE;
break;
- case PT_GNU_EH_FRAME:
- /* XXX */
+
+ case DT_VERDEFNUM:
+ case DT_VERNEEDNUM:
+ case DT_FLAGS_1:
+ case DT_RELACOUNT:
+ case DT_RELCOUNT:
+ case DT_VALRNGLO ... DT_VALRNGHI:
+ /* definitely not pointers */
break;
+
+ case OLD_DT_LOOS ... DT_LOOS-1:
+ case DT_HIOS ... DT_VALRNGLO-1:
default:
- printk(KERN_WARNING "vDSO program header %u has unsupported type %08X\n", i, phdr->p_type);
- WARN_ON(1);
+ if (dyn->d_tag > DT_ENCODING)
+ printk(KERN_INFO "VDSO: unexpected DT_tag %x\n",
+ dyn->d_tag);
break;
}
+}
+
+static __init void relocate_vdso(Elf32_Ehdr *ehdr)
+{
+ Elf32_Phdr *phdr;
+ Elf32_Shdr *shdr;
+ int i;
+
+ BUG_ON(memcmp(ehdr->e_ident, ELFMAG, 4) != 0 ||
+ !elf_check_arch(ehdr) ||
+ ehdr->e_type != ET_DYN);
+
+ ehdr->e_entry += VDSO_HIGH_BASE;
+
+ /* rebase phdrs */
+ phdr = (void *)ehdr + ehdr->e_phoff;
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr[i].p_vaddr += VDSO_HIGH_BASE;
+
+ /* relocate dynamic stuff */
+ if (phdr[i].p_type == PT_DYNAMIC)
+ reloc_dyn(ehdr, phdr[i].p_offset);
}
- BUG_ON(ehdr->e_shentsize < sizeof(Elf32_Shdr));
- BUG_ON(ehdr->e_shnum >= SHN_LORESERVE);
- for (i = 1; i < ehdr->e_shnum; ++i) {
- Elf32_Shdr *shdr = (void *)((unsigned long)ehdr + ehdr->e_shoff + i * ehdr->e_shentsize);
- if (!(shdr->sh_flags & SHF_ALLOC))
+ /* rebase sections */
+ shdr = (void *)ehdr + ehdr->e_shoff;
+ for(i = 0; i < ehdr->e_shnum; i++) {
+ if (!(shdr[i].sh_flags & SHF_ALLOC))
continue;
- shdr->sh_addr += new_base - old_base;
- switch(shdr->sh_type) {
- case SHT_DYNAMIC:
- case SHT_HASH:
- case SHT_NOBITS:
- case SHT_NOTE:
- case SHT_PROGBITS:
- case SHT_STRTAB:
- case 0x6ffffffd: /* SHT_GNU_verdef */
- case 0x6fffffff: /* SHT_GNU_versym */
- break;
- case SHT_DYNSYM:
- BUG_ON(shdr->sh_entsize < sizeof(Elf32_Sym));
- if (!szdynsym)
- szdynsym = shdr->sh_entsize;
- else
- WARN_ON(szdynsym != shdr->sh_entsize);
- if (!ndynsym)
- ndynsym = shdr->sh_size / szdynsym;
- else
- WARN_ON(ndynsym != shdr->sh_size / szdynsym);
- if (!dynsym)
- dynsym = shdr->sh_addr;
- else
- WARN_ON(dynsym != shdr->sh_addr);
- break;
- default:
- printk(KERN_WARNING "vDSO section %u has unsupported type %08X\n", i, shdr->sh_type);
- WARN_ON(shdr->sh_size);
- break;
- }
+
+ shdr[i].sh_addr += VDSO_HIGH_BASE;
+
+ if (shdr[i].sh_type == SHT_SYMTAB ||
+ shdr[i].sh_type == SHT_DYNSYM)
+ reloc_symtab(ehdr, shdr[i].sh_offset,
+ shdr[i].sh_size);
}
- dynsym += (unsigned long)ehdr - new_base;
- for(i = 1; i < ndynsym; ++i) {
- Elf32_Sym *sym = (void *)(dynsym + i * szdynsym);
+}
- if (sym->st_shndx == SHN_ABS)
- continue;
- sym->st_value += new_base - old_base;
+void enable_sep_cpu(void)
+{
+ int cpu = get_cpu();
+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
+
+ if (!boot_cpu_has(X86_FEATURE_SEP)) {
+ put_cpu();
+ return;
}
-#endif
+
+ tss->x86_tss.ss1 = __KERNEL_CS;
+ tss->x86_tss.esp1 = sizeof(struct tss_struct) + (unsigned long) tss;
+ wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
+ wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.esp1, 0);
+ wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
+ put_cpu();
+}
+
+static struct vm_area_struct gate_vma;
+
+static int __init gate_vma_init(void)
+{
+ gate_vma.vm_mm = NULL;
+ gate_vma.vm_start = FIXADDR_USER_START;
+ gate_vma.vm_end = FIXADDR_USER_END;
+ gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+ gate_vma.vm_page_prot = __P101;
+ /*
+ * Make sure the vDSO gets into every core dump.
+ * Dumping its contents makes post-mortem fully interpretable later
+ * without matching up the same kernel and hardware config to see
+ * what PC values meant.
+ */
+ gate_vma.vm_flags |= VM_ALWAYSDUMP;
+ return 0;
}
-#else
-#define relocate_vdso(ehdr, old, new, start, end) ((void)0)
-#endif
/*
* These symbols are defined by vsyscall.o to mark the bounds
extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
static struct page *syscall_pages[1];
+static void map_compat_vdso(int map)
+{
+ static int vdso_mapped;
+
+ if (map == vdso_mapped)
+ return;
+
+ vdso_mapped = map;
+
+ __set_fixmap(FIX_VDSO, page_to_pfn(syscall_pages[0]) << PAGE_SHIFT,
+ map ? PAGE_READONLY_EXEC : PAGE_NONE);
+
+ /* flush stray tlbs */
+ flush_tlb_all();
+}
+
int __init sysenter_setup(void)
{
void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
- syscall_pages[0] = virt_to_page(syscall_page);
+ const void *vsyscall;
+ size_t vsyscall_len;
-#ifdef CONFIG_XEN
- if (boot_cpu_has(X86_FEATURE_SEP)) {
- static struct callback_register __initdata sysenter = {
- .type = CALLBACKTYPE_sysenter,
- .address = { __KERNEL_CS, (unsigned long)sysenter_entry },
- };
+ syscall_pages[0] = virt_to_page(syscall_page);
- if (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) < 0)
- clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability);
- }
-#endif
+ gate_vma_init();
-#ifdef CONFIG_COMPAT_VDSO
- __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY_EXEC);
printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
-#endif
if (!boot_cpu_has(X86_FEATURE_SEP)) {
- memcpy(syscall_page,
- &vsyscall_int80_start,
- &vsyscall_int80_end - &vsyscall_int80_start);
- relocate_vdso(syscall_page, VDSO_PRELINK, __fix_to_virt(FIX_VDSO),
- vdso_rel_int80_start, vdso_rel_int80_end);
- return 0;
+ vsyscall = &vsyscall_int80_start;
+ vsyscall_len = &vsyscall_int80_end - &vsyscall_int80_start;
+ } else {
+ vsyscall = &vsyscall_sysenter_start;
+ vsyscall_len = &vsyscall_sysenter_end - &vsyscall_sysenter_start;
}
- memcpy(syscall_page,
- &vsyscall_sysenter_start,
- &vsyscall_sysenter_end - &vsyscall_sysenter_start);
- relocate_vdso(syscall_page, VDSO_PRELINK, __fix_to_virt(FIX_VDSO),
- vdso_rel_sysenter_start, vdso_rel_sysenter_end);
+ memcpy(syscall_page, vsyscall, vsyscall_len);
+ relocate_vdso(syscall_page);
return 0;
}
-#ifndef CONFIG_COMPAT_VDSO
/* Defined in vsyscall-sysenter.S */
extern void SYSENTER_RETURN;
{
struct mm_struct *mm = current->mm;
unsigned long addr;
- int ret;
+ int ret = 0;
+ bool compat;
down_write(&mm->mmap_sem);
- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
- if (IS_ERR_VALUE(addr)) {
- ret = addr;
- goto up_fail;
- }
- /*
- * MAYWRITE to allow gdb to COW and set breakpoints
- *
- * Make sure the vDSO gets into every core dump.
- * Dumping its contents makes post-mortem fully interpretable later
- * without matching up the same kernel and hardware config to see
- * what PC values meant.
- */
- ret = install_special_mapping(mm, addr, PAGE_SIZE,
- VM_READ|VM_EXEC|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
- VM_ALWAYSDUMP,
- syscall_pages);
- if (ret)
- goto up_fail;
+ /* Test compat mode once here, in case someone
+ changes it via sysctl */
+ compat = (vdso_enabled == VDSO_COMPAT);
+
+ map_compat_vdso(compat);
+
+ if (compat)
+ addr = VDSO_HIGH_BASE;
+ else {
+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+
+ /*
+ * MAYWRITE to allow gdb to COW and set breakpoints
+ *
+ * Make sure the vDSO gets into every core dump.
+ * Dumping its contents makes post-mortem fully
+ * interpretable later without matching up the same
+ * kernel and hardware config to see what PC values
+ * meant.
+ */
+ ret = install_special_mapping(mm, addr, PAGE_SIZE,
+ VM_READ|VM_EXEC|
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
+ VM_ALWAYSDUMP,
+ syscall_pages);
+
+ if (ret)
+ goto up_fail;
+ }
current->mm->context.vdso = (void *)addr;
current_thread_info()->sysenter_return =
- (void *)VDSO_SYM(&SYSENTER_RETURN);
-up_fail:
+ (void *)VDSO_SYM(&SYSENTER_RETURN);
+
+ up_fail:
up_write(&mm->mmap_sem);
+
return ret;
}
struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
{
+ struct mm_struct *mm = tsk->mm;
+
+ /* Check to see if this task was created in compat vdso mode */
+ if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
+ return &gate_vma;
return NULL;
}
{
return 0;
}
-#endif