2 * (C) Copyright 2002 Linus Torvalds
3 * Portions based on the vdso-randomization code from exec-shield:
4 * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
6 * This file contains the needed initializations to support sysenter.
9 #include <linux/init.h>
10 #include <linux/smp.h>
11 #include <linux/thread_info.h>
12 #include <linux/sched.h>
13 #include <linux/gfp.h>
14 #include <linux/string.h>
15 #include <linux/elf.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
20 #include <asm/cpufeature.h>
22 #include <asm/pgtable.h>
23 #include <asm/unistd.h>
25 #include <asm/tlbflush.h>
27 #include <asm/proto.h>
29 #include <xen/interface/callback.h>
37 #ifdef CONFIG_COMPAT_VDSO
38 #define VDSO_DEFAULT VDSO_COMPAT
40 #define VDSO_DEFAULT VDSO_ENABLED
44 #define vdso_enabled sysctl_vsyscall32
45 #define arch_setup_additional_pages syscall32_setup_pages
49 * This is the difference between the prelinked addresses in the vDSO images
50 * and the VDSO_HIGH_BASE address where CONFIG_COMPAT_VDSO places the vDSO
51 * in the user address space.
53 #define VDSO_ADDR_ADJUST (VDSO_HIGH_BASE - (unsigned long)VDSO32_PRELINK)
56 * Should the kernel map a VDSO page into processes and pass its
57 * address down to glibc upon exec()?
59 unsigned int __read_mostly vdso_enabled = VDSO_DEFAULT;
61 static int __init vdso_setup(char *s)
63 vdso_enabled = simple_strtoul(s, NULL, 0);
69 * For consistency, the argument vdso32=[012] affects the 32-bit vDSO
70 * behavior on both 64-bit and 32-bit kernels.
71 * On 32-bit kernels, vdso=[012] means the same thing.
73 __setup("vdso32=", vdso_setup);
76 __setup_param("vdso=", vdso32_setup, vdso_setup, 0);
78 EXPORT_SYMBOL_GPL(vdso_enabled);
81 static __init void reloc_symtab(Elf32_Ehdr *ehdr,
82 unsigned offset, unsigned size)
84 Elf32_Sym *sym = (void *)ehdr + offset;
85 unsigned nsym = size / sizeof(*sym);
88 for(i = 0; i < nsym; i++, sym++) {
89 if (sym->st_shndx == SHN_UNDEF ||
90 sym->st_shndx == SHN_ABS)
93 if (sym->st_shndx > SHN_LORESERVE) {
94 printk(KERN_INFO "VDSO: unexpected st_shndx %x\n",
99 switch(ELF_ST_TYPE(sym->st_info)) {
104 sym->st_value += VDSO_ADDR_ADJUST;
109 static __init void reloc_dyn(Elf32_Ehdr *ehdr, unsigned offset)
111 Elf32_Dyn *dyn = (void *)ehdr + offset;
113 for(; dyn->d_tag != DT_NULL; dyn++)
128 case DT_ADDRRNGLO ... DT_ADDRRNGHI:
129 /* definitely pointers needing relocation */
130 dyn->d_un.d_ptr += VDSO_ADDR_ADJUST;
133 case DT_ENCODING ... OLD_DT_LOOS-1:
134 case DT_LOOS ... DT_HIOS-1:
135 /* Tags above DT_ENCODING are pointers if
137 if (dyn->d_tag >= DT_ENCODING &&
138 (dyn->d_tag & 1) == 0)
139 dyn->d_un.d_ptr += VDSO_ADDR_ADJUST;
147 case DT_VALRNGLO ... DT_VALRNGHI:
148 /* definitely not pointers */
151 case OLD_DT_LOOS ... DT_LOOS-1:
152 case DT_HIOS ... DT_VALRNGLO-1:
154 if (dyn->d_tag > DT_ENCODING)
155 printk(KERN_INFO "VDSO: unexpected DT_tag %x\n",
161 static __init void relocate_vdso(Elf32_Ehdr *ehdr)
167 BUG_ON(memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0 ||
168 !elf_check_arch_ia32(ehdr) ||
169 ehdr->e_type != ET_DYN);
171 ehdr->e_entry += VDSO_ADDR_ADJUST;
174 phdr = (void *)ehdr + ehdr->e_phoff;
175 for (i = 0; i < ehdr->e_phnum; i++) {
176 phdr[i].p_vaddr += VDSO_ADDR_ADJUST;
178 /* relocate dynamic stuff */
179 if (phdr[i].p_type == PT_DYNAMIC)
180 reloc_dyn(ehdr, phdr[i].p_offset);
183 /* rebase sections */
184 shdr = (void *)ehdr + ehdr->e_shoff;
185 for(i = 0; i < ehdr->e_shnum; i++) {
186 if (!(shdr[i].sh_flags & SHF_ALLOC))
189 shdr[i].sh_addr += VDSO_ADDR_ADJUST;
191 if (shdr[i].sh_type == SHT_SYMTAB ||
192 shdr[i].sh_type == SHT_DYNSYM)
193 reloc_symtab(ehdr, shdr[i].sh_offset,
198 static struct page *vdso32_pages[1];
202 #define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SYSENTER32))
203 #define vdso32_syscall() (boot_cpu_has(X86_FEATURE_SYSCALL32))
205 void __cpuinit syscall32_cpu_init(void)
207 static const struct callback_register __cpuinitconst cstar = {
208 .type = CALLBACKTYPE_syscall32,
209 .address = (unsigned long)ia32_cstar_target
211 static const struct callback_register __cpuinitconst sysenter = {
212 .type = CALLBACKTYPE_sysenter,
213 .address = (unsigned long)ia32_sysenter_target
216 if (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) < 0)
217 setup_clear_cpu_cap(X86_FEATURE_SYSENTER32);
218 if (HYPERVISOR_callback_op(CALLBACKOP_register, &cstar) < 0)
219 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
222 #define compat_uses_vma 1
224 static inline void map_compat_vdso(int map)
228 #else /* CONFIG_X86_32 */
230 #define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SEP))
231 #define vdso32_syscall() (boot_cpu_has(X86_FEATURE_SYSCALL32))
233 extern asmlinkage void ia32pv_cstar_target(void);
234 static const struct callback_register __cpuinitconst cstar = {
235 .type = CALLBACKTYPE_syscall32,
236 .address = { __KERNEL_CS, (unsigned long)ia32pv_cstar_target },
239 void __cpuinit enable_sep_cpu(void)
241 extern asmlinkage void ia32pv_sysenter_target(void);
242 static struct callback_register __cpuinitdata sysenter = {
243 .type = CALLBACKTYPE_sysenter,
244 .address = { __KERNEL_CS, (unsigned long)ia32pv_sysenter_target },
247 if (vdso32_syscall()) {
248 if (HYPERVISOR_callback_op(CALLBACKOP_register, &cstar) != 0)
253 if (!vdso32_sysenter())
256 if (xen_feature(XENFEAT_supervisor_mode_kernel))
257 sysenter.address.eip = (unsigned long)ia32_sysenter_target;
259 switch (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter)) {
262 #if CONFIG_XEN_COMPAT < 0x030200
264 sysenter.type = CALLBACKTYPE_sysenter_deprecated;
265 if (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) == 0)
269 setup_clear_cpu_cap(X86_FEATURE_SEP);
274 static struct vm_area_struct gate_vma;
276 static int __init gate_vma_init(void)
278 gate_vma.vm_mm = NULL;
279 gate_vma.vm_start = FIXADDR_USER_START;
280 gate_vma.vm_end = FIXADDR_USER_END;
281 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
282 gate_vma.vm_page_prot = __P101;
284 * Make sure the vDSO gets into every core dump.
285 * Dumping its contents makes post-mortem fully interpretable later
286 * without matching up the same kernel and hardware config to see
287 * what PC values meant.
289 gate_vma.vm_flags |= VM_ALWAYSDUMP;
293 #define compat_uses_vma 0
295 static void map_compat_vdso(int map)
297 static int vdso_mapped;
299 if (map == vdso_mapped)
304 __set_fixmap(FIX_VDSO, page_to_pfn(vdso32_pages[0]) << PAGE_SHIFT,
305 map ? PAGE_READONLY_EXEC : PAGE_NONE);
307 /* flush stray tlbs */
311 #endif /* CONFIG_X86_64 */
313 int __init sysenter_setup(void)
315 void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
316 const void *vsyscall;
319 vdso32_pages[0] = virt_to_page(syscall_page);
324 if (boot_cpu_has(X86_FEATURE_SYSCALL)) {
325 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD
326 && HYPERVISOR_callback_op(CALLBACKOP_register, &cstar) == 0)
327 setup_force_cpu_cap(X86_FEATURE_SYSCALL32);
329 setup_clear_cpu_cap(X86_FEATURE_SYSCALL);
330 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
334 if (vdso32_syscall()) {
335 vsyscall = &vdso32_syscall_start;
336 vsyscall_len = &vdso32_syscall_end - &vdso32_syscall_start;
337 } else if (vdso32_sysenter()){
338 vsyscall = &vdso32_sysenter_start;
339 vsyscall_len = &vdso32_sysenter_end - &vdso32_sysenter_start;
341 vsyscall = &vdso32_int80_start;
342 vsyscall_len = &vdso32_int80_end - &vdso32_int80_start;
345 memcpy(syscall_page, vsyscall, vsyscall_len);
346 relocate_vdso(syscall_page);
351 /* Setup a VMA at program startup for the vsyscall page */
352 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
354 struct mm_struct *mm = current->mm;
359 if (vdso_enabled == VDSO_DISABLED)
362 down_write(&mm->mmap_sem);
364 /* Test compat mode once here, in case someone
365 changes it via sysctl */
366 compat = (vdso_enabled == VDSO_COMPAT);
368 map_compat_vdso(compat);
371 addr = VDSO_HIGH_BASE;
373 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
374 if (IS_ERR_VALUE(addr)) {
380 current->mm->context.vdso = (void *)addr;
382 if (compat_uses_vma || !compat) {
384 * MAYWRITE to allow gdb to COW and set breakpoints
386 * Make sure the vDSO gets into every core dump.
387 * Dumping its contents makes post-mortem fully
388 * interpretable later without matching up the same
389 * kernel and hardware config to see what PC values
392 ret = install_special_mapping(mm, addr, PAGE_SIZE,
394 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
402 current_thread_info()->sysenter_return =
403 VDSO32_SYMBOL(addr, SYSENTER_RETURN);
407 current->mm->context.vdso = NULL;
409 up_write(&mm->mmap_sem);
417 * This must be done early in case we have an initrd containing 32-bit
418 * binaries (e.g., hotplug). This could be pushed upstream.
420 core_initcall(sysenter_setup);
423 /* Register vsyscall32 into the ABI table */
424 #include <linux/sysctl.h>
426 static ctl_table abi_table2[] = {
428 .procname = "vsyscall32",
429 .data = &sysctl_vsyscall32,
430 .maxlen = sizeof(int),
432 .proc_handler = proc_dointvec
437 static ctl_table abi_root_table2[] = {
446 static __init int ia32_binfmt_init(void)
448 register_sysctl_table(abi_root_table2);
451 __initcall(ia32_binfmt_init);
454 #else /* CONFIG_X86_32 */
456 const char *arch_vma_name(struct vm_area_struct *vma)
458 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
463 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
465 struct mm_struct *mm = tsk->mm;
467 /* Check to see if this task was created in compat vdso mode */
468 if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
473 int in_gate_area(struct task_struct *task, unsigned long addr)
475 const struct vm_area_struct *vma = get_gate_vma(task);
477 return vma && addr >= vma->vm_start && addr < vma->vm_end;
480 int in_gate_area_no_task(unsigned long addr)
485 #endif /* CONFIG_X86_64 */