Added patch headers.
[linux-flexiantxendom0-3.2.10.git] / arch / x86 / vdso / vdso32-setup-xen.c
1 /*
2  * (C) Copyright 2002 Linus Torvalds
3  * Portions based on the vdso-randomization code from exec-shield:
4  * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
5  *
6  * This file contains the needed initializations to support sysenter.
7  */
8
9 #include <linux/init.h>
10 #include <linux/smp.h>
11 #include <linux/thread_info.h>
12 #include <linux/sched.h>
13 #include <linux/gfp.h>
14 #include <linux/string.h>
15 #include <linux/elf.h>
16 #include <linux/mm.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19
20 #include <asm/cpufeature.h>
21 #include <asm/msr.h>
22 #include <asm/pgtable.h>
23 #include <asm/unistd.h>
24 #include <asm/elf.h>
25 #include <asm/tlbflush.h>
26 #include <asm/vdso.h>
27 #include <asm/proto.h>
28
29 #include <xen/interface/callback.h>
30
31 enum {
32         VDSO_DISABLED = 0,
33         VDSO_ENABLED = 1,
34         VDSO_COMPAT = 2,
35 };
36
37 #ifdef CONFIG_COMPAT_VDSO
38 #define VDSO_DEFAULT    VDSO_COMPAT
39 #else
40 #define VDSO_DEFAULT    VDSO_ENABLED
41 #endif
42
43 #ifdef CONFIG_X86_64
44 #define vdso_enabled                    sysctl_vsyscall32
45 #define arch_setup_additional_pages     syscall32_setup_pages
46 #endif
47
48 /*
49  * This is the difference between the prelinked addresses in the vDSO images
50  * and the VDSO_HIGH_BASE address where CONFIG_COMPAT_VDSO places the vDSO
51  * in the user address space.
52  */
53 #define VDSO_ADDR_ADJUST        (VDSO_HIGH_BASE - (unsigned long)VDSO32_PRELINK)
54
55 /*
56  * Should the kernel map a VDSO page into processes and pass its
57  * address down to glibc upon exec()?
58  */
59 unsigned int __read_mostly vdso_enabled = VDSO_DEFAULT;
60
61 static int __init vdso_setup(char *s)
62 {
63         vdso_enabled = simple_strtoul(s, NULL, 0);
64
65         return 1;
66 }
67
68 /*
69  * For consistency, the argument vdso32=[012] affects the 32-bit vDSO
70  * behavior on both 64-bit and 32-bit kernels.
71  * On 32-bit kernels, vdso=[012] means the same thing.
72  */
73 __setup("vdso32=", vdso_setup);
74
75 #ifdef CONFIG_X86_32
76 __setup_param("vdso=", vdso32_setup, vdso_setup, 0);
77
78 EXPORT_SYMBOL_GPL(vdso_enabled);
79 #endif
80
81 static __init void reloc_symtab(Elf32_Ehdr *ehdr,
82                                 unsigned offset, unsigned size)
83 {
84         Elf32_Sym *sym = (void *)ehdr + offset;
85         unsigned nsym = size / sizeof(*sym);
86         unsigned i;
87
88         for(i = 0; i < nsym; i++, sym++) {
89                 if (sym->st_shndx == SHN_UNDEF ||
90                     sym->st_shndx == SHN_ABS)
91                         continue;  /* skip */
92
93                 if (sym->st_shndx > SHN_LORESERVE) {
94                         printk(KERN_INFO "VDSO: unexpected st_shndx %x\n",
95                                sym->st_shndx);
96                         continue;
97                 }
98
99                 switch(ELF_ST_TYPE(sym->st_info)) {
100                 case STT_OBJECT:
101                 case STT_FUNC:
102                 case STT_SECTION:
103                 case STT_FILE:
104                         sym->st_value += VDSO_ADDR_ADJUST;
105                 }
106         }
107 }
108
109 static __init void reloc_dyn(Elf32_Ehdr *ehdr, unsigned offset)
110 {
111         Elf32_Dyn *dyn = (void *)ehdr + offset;
112
113         for(; dyn->d_tag != DT_NULL; dyn++)
114                 switch(dyn->d_tag) {
115                 case DT_PLTGOT:
116                 case DT_HASH:
117                 case DT_STRTAB:
118                 case DT_SYMTAB:
119                 case DT_RELA:
120                 case DT_INIT:
121                 case DT_FINI:
122                 case DT_REL:
123                 case DT_DEBUG:
124                 case DT_JMPREL:
125                 case DT_VERSYM:
126                 case DT_VERDEF:
127                 case DT_VERNEED:
128                 case DT_ADDRRNGLO ... DT_ADDRRNGHI:
129                         /* definitely pointers needing relocation */
130                         dyn->d_un.d_ptr += VDSO_ADDR_ADJUST;
131                         break;
132
133                 case DT_ENCODING ... OLD_DT_LOOS-1:
134                 case DT_LOOS ... DT_HIOS-1:
135                         /* Tags above DT_ENCODING are pointers if
136                            they're even */
137                         if (dyn->d_tag >= DT_ENCODING &&
138                             (dyn->d_tag & 1) == 0)
139                                 dyn->d_un.d_ptr += VDSO_ADDR_ADJUST;
140                         break;
141
142                 case DT_VERDEFNUM:
143                 case DT_VERNEEDNUM:
144                 case DT_FLAGS_1:
145                 case DT_RELACOUNT:
146                 case DT_RELCOUNT:
147                 case DT_VALRNGLO ... DT_VALRNGHI:
148                         /* definitely not pointers */
149                         break;
150
151                 case OLD_DT_LOOS ... DT_LOOS-1:
152                 case DT_HIOS ... DT_VALRNGLO-1:
153                 default:
154                         if (dyn->d_tag > DT_ENCODING)
155                                 printk(KERN_INFO "VDSO: unexpected DT_tag %x\n",
156                                        dyn->d_tag);
157                         break;
158                 }
159 }
160
161 static __init void relocate_vdso(Elf32_Ehdr *ehdr)
162 {
163         Elf32_Phdr *phdr;
164         Elf32_Shdr *shdr;
165         int i;
166
167         BUG_ON(memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0 ||
168                !elf_check_arch_ia32(ehdr) ||
169                ehdr->e_type != ET_DYN);
170
171         ehdr->e_entry += VDSO_ADDR_ADJUST;
172
173         /* rebase phdrs */
174         phdr = (void *)ehdr + ehdr->e_phoff;
175         for (i = 0; i < ehdr->e_phnum; i++) {
176                 phdr[i].p_vaddr += VDSO_ADDR_ADJUST;
177
178                 /* relocate dynamic stuff */
179                 if (phdr[i].p_type == PT_DYNAMIC)
180                         reloc_dyn(ehdr, phdr[i].p_offset);
181         }
182
183         /* rebase sections */
184         shdr = (void *)ehdr + ehdr->e_shoff;
185         for(i = 0; i < ehdr->e_shnum; i++) {
186                 if (!(shdr[i].sh_flags & SHF_ALLOC))
187                         continue;
188
189                 shdr[i].sh_addr += VDSO_ADDR_ADJUST;
190
191                 if (shdr[i].sh_type == SHT_SYMTAB ||
192                     shdr[i].sh_type == SHT_DYNSYM)
193                         reloc_symtab(ehdr, shdr[i].sh_offset,
194                                      shdr[i].sh_size);
195         }
196 }
197
198 static struct page *vdso32_pages[1];
199
200 #ifdef CONFIG_X86_64
201
202 #define vdso32_sysenter()       (boot_cpu_has(X86_FEATURE_SYSENTER32))
203 #define vdso32_syscall()        (boot_cpu_has(X86_FEATURE_SYSCALL32))
204
205 void __cpuinit syscall32_cpu_init(void)
206 {
207         static const struct callback_register __cpuinitconst cstar = {
208                 .type = CALLBACKTYPE_syscall32,
209                 .address = (unsigned long)ia32_cstar_target
210         };
211         static const struct callback_register __cpuinitconst sysenter = {
212                 .type = CALLBACKTYPE_sysenter,
213                 .address = (unsigned long)ia32_sysenter_target
214         };
215
216         if (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) < 0)
217                 setup_clear_cpu_cap(X86_FEATURE_SYSENTER32);
218         if (HYPERVISOR_callback_op(CALLBACKOP_register, &cstar) < 0)
219                 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
220 }
221
222 #define compat_uses_vma         1
223
224 static inline void map_compat_vdso(int map)
225 {
226 }
227
228 #else  /* CONFIG_X86_32 */
229
230 #define vdso32_sysenter()       (boot_cpu_has(X86_FEATURE_SEP))
231 #define vdso32_syscall()        (boot_cpu_has(X86_FEATURE_SYSCALL32))
232
233 extern asmlinkage void ia32pv_cstar_target(void);
234 static const struct callback_register __cpuinitconst cstar = {
235         .type = CALLBACKTYPE_syscall32,
236         .address = { __KERNEL_CS, (unsigned long)ia32pv_cstar_target },
237 };
238
239 void __cpuinit enable_sep_cpu(void)
240 {
241         extern asmlinkage void ia32pv_sysenter_target(void);
242         static struct callback_register __cpuinitdata sysenter = {
243                 .type = CALLBACKTYPE_sysenter,
244                 .address = { __KERNEL_CS, (unsigned long)ia32pv_sysenter_target },
245         };
246
247         if (vdso32_syscall()) {
248                 if (HYPERVISOR_callback_op(CALLBACKOP_register, &cstar) != 0)
249                         BUG();
250                 return;
251         }
252
253         if (!vdso32_sysenter())
254                 return;
255
256         if (xen_feature(XENFEAT_supervisor_mode_kernel))
257                 sysenter.address.eip = (unsigned long)ia32_sysenter_target;
258
259         switch (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter)) {
260         case 0:
261                 break;
262 #if CONFIG_XEN_COMPAT < 0x030200
263         case -ENOSYS:
264                 sysenter.type = CALLBACKTYPE_sysenter_deprecated;
265                 if (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) == 0)
266                         break;
267 #endif
268         default:
269                 setup_clear_cpu_cap(X86_FEATURE_SEP);
270                 break;
271         }
272 }
273
274 static struct vm_area_struct gate_vma;
275
276 static int __init gate_vma_init(void)
277 {
278         gate_vma.vm_mm = NULL;
279         gate_vma.vm_start = FIXADDR_USER_START;
280         gate_vma.vm_end = FIXADDR_USER_END;
281         gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
282         gate_vma.vm_page_prot = __P101;
283         /*
284          * Make sure the vDSO gets into every core dump.
285          * Dumping its contents makes post-mortem fully interpretable later
286          * without matching up the same kernel and hardware config to see
287          * what PC values meant.
288          */
289         gate_vma.vm_flags |= VM_ALWAYSDUMP;
290         return 0;
291 }
292
293 #define compat_uses_vma         0
294
295 static void map_compat_vdso(int map)
296 {
297         static int vdso_mapped;
298
299         if (map == vdso_mapped)
300                 return;
301
302         vdso_mapped = map;
303
304         __set_fixmap(FIX_VDSO, page_to_pfn(vdso32_pages[0]) << PAGE_SHIFT,
305                      map ? PAGE_READONLY_EXEC : PAGE_NONE);
306
307         /* flush stray tlbs */
308         flush_tlb_all();
309 }
310
311 #endif  /* CONFIG_X86_64 */
312
313 int __init sysenter_setup(void)
314 {
315         void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
316         const void *vsyscall;
317         size_t vsyscall_len;
318
319         vdso32_pages[0] = virt_to_page(syscall_page);
320
321 #ifdef CONFIG_X86_32
322         gate_vma_init();
323
324         if (boot_cpu_has(X86_FEATURE_SYSCALL)) {
325                 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD
326                     && HYPERVISOR_callback_op(CALLBACKOP_register, &cstar) == 0)
327                         setup_force_cpu_cap(X86_FEATURE_SYSCALL32);
328                 else {
329                         setup_clear_cpu_cap(X86_FEATURE_SYSCALL);
330                         setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
331                 }
332         }
333 #endif
334         if (vdso32_syscall()) {
335                 vsyscall = &vdso32_syscall_start;
336                 vsyscall_len = &vdso32_syscall_end - &vdso32_syscall_start;
337         } else if (vdso32_sysenter()){
338                 vsyscall = &vdso32_sysenter_start;
339                 vsyscall_len = &vdso32_sysenter_end - &vdso32_sysenter_start;
340         } else {
341                 vsyscall = &vdso32_int80_start;
342                 vsyscall_len = &vdso32_int80_end - &vdso32_int80_start;
343         }
344
345         memcpy(syscall_page, vsyscall, vsyscall_len);
346         relocate_vdso(syscall_page);
347
348         return 0;
349 }
350
351 /* Setup a VMA at program startup for the vsyscall page */
352 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
353 {
354         struct mm_struct *mm = current->mm;
355         unsigned long addr;
356         int ret = 0;
357         bool compat;
358
359         if (vdso_enabled == VDSO_DISABLED)
360                 return 0;
361
362         down_write(&mm->mmap_sem);
363
364         /* Test compat mode once here, in case someone
365            changes it via sysctl */
366         compat = (vdso_enabled == VDSO_COMPAT);
367
368         map_compat_vdso(compat);
369
370         if (compat)
371                 addr = VDSO_HIGH_BASE;
372         else {
373                 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
374                 if (IS_ERR_VALUE(addr)) {
375                         ret = addr;
376                         goto up_fail;
377                 }
378         }
379
380         current->mm->context.vdso = (void *)addr;
381
382         if (compat_uses_vma || !compat) {
383                 /*
384                  * MAYWRITE to allow gdb to COW and set breakpoints
385                  *
386                  * Make sure the vDSO gets into every core dump.
387                  * Dumping its contents makes post-mortem fully
388                  * interpretable later without matching up the same
389                  * kernel and hardware config to see what PC values
390                  * meant.
391                  */
392                 ret = install_special_mapping(mm, addr, PAGE_SIZE,
393                                               VM_READ|VM_EXEC|
394                                               VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
395                                               VM_ALWAYSDUMP,
396                                               vdso32_pages);
397
398                 if (ret)
399                         goto up_fail;
400         }
401
402         current_thread_info()->sysenter_return =
403                 VDSO32_SYMBOL(addr, SYSENTER_RETURN);
404
405   up_fail:
406         if (ret)
407                 current->mm->context.vdso = NULL;
408
409         up_write(&mm->mmap_sem);
410
411         return ret;
412 }
413
414 #ifdef CONFIG_X86_64
415
416 /*
417  * This must be done early in case we have an initrd containing 32-bit
418  * binaries (e.g., hotplug). This could be pushed upstream.
419  */
420 core_initcall(sysenter_setup);
421
422 #ifdef CONFIG_SYSCTL
423 /* Register vsyscall32 into the ABI table */
424 #include <linux/sysctl.h>
425
426 static ctl_table abi_table2[] = {
427         {
428                 .procname       = "vsyscall32",
429                 .data           = &sysctl_vsyscall32,
430                 .maxlen         = sizeof(int),
431                 .mode           = 0644,
432                 .proc_handler   = proc_dointvec
433         },
434         {}
435 };
436
437 static ctl_table abi_root_table2[] = {
438         {
439                 .procname = "abi",
440                 .mode = 0555,
441                 .child = abi_table2
442         },
443         {}
444 };
445
446 static __init int ia32_binfmt_init(void)
447 {
448         register_sysctl_table(abi_root_table2);
449         return 0;
450 }
451 __initcall(ia32_binfmt_init);
452 #endif
453
454 #else  /* CONFIG_X86_32 */
455
456 const char *arch_vma_name(struct vm_area_struct *vma)
457 {
458         if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
459                 return "[vdso]";
460         return NULL;
461 }
462
463 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
464 {
465         struct mm_struct *mm = tsk->mm;
466
467         /* Check to see if this task was created in compat vdso mode */
468         if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
469                 return &gate_vma;
470         return NULL;
471 }
472
473 int in_gate_area(struct task_struct *task, unsigned long addr)
474 {
475         const struct vm_area_struct *vma = get_gate_vma(task);
476
477         return vma && addr >= vma->vm_start && addr < vma->vm_end;
478 }
479
480 int in_gate_area_no_task(unsigned long addr)
481 {
482         return 0;
483 }
484
485 #endif  /* CONFIG_X86_64 */