Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / arch / x86 / vdso / vdso32-setup-xen.c
1 /*
2  * (C) Copyright 2002 Linus Torvalds
3  * Portions based on the vdso-randomization code from exec-shield:
4  * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
5  *
6  * This file contains the needed initializations to support sysenter.
7  */
8
9 #include <linux/init.h>
10 #include <linux/smp.h>
11 #include <linux/thread_info.h>
12 #include <linux/sched.h>
13 #include <linux/gfp.h>
14 #include <linux/string.h>
15 #include <linux/elf.h>
16 #include <linux/mm.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19
20 #include <asm/cpufeature.h>
21 #include <asm/msr.h>
22 #include <asm/pgtable.h>
23 #include <asm/unistd.h>
24 #include <asm/elf.h>
25 #include <asm/tlbflush.h>
26 #include <asm/vdso.h>
27 #include <asm/proto.h>
28
29 #include <xen/interface/callback.h>
30
31 enum {
32         VDSO_DISABLED = 0,
33         VDSO_ENABLED = 1,
34         VDSO_COMPAT = 2,
35 };
36
37 #ifdef CONFIG_COMPAT_VDSO
38 #define VDSO_DEFAULT    VDSO_COMPAT
39 #else
40 #define VDSO_DEFAULT    VDSO_ENABLED
41 #endif
42
43 #ifdef CONFIG_X86_64
44 #define vdso_enabled                    sysctl_vsyscall32
45 #define arch_setup_additional_pages     syscall32_setup_pages
46 #endif
47
48 /*
49  * This is the difference between the prelinked addresses in the vDSO images
50  * and the VDSO_HIGH_BASE address where CONFIG_COMPAT_VDSO places the vDSO
51  * in the user address space.
52  */
53 #define VDSO_ADDR_ADJUST        (VDSO_HIGH_BASE - (unsigned long)VDSO32_PRELINK)
54
55 /*
56  * Should the kernel map a VDSO page into processes and pass its
57  * address down to glibc upon exec()?
58  */
59 unsigned int __read_mostly vdso_enabled = VDSO_DEFAULT;
60
61 static int __init vdso_setup(char *s)
62 {
63         vdso_enabled = simple_strtoul(s, NULL, 0);
64
65         return 1;
66 }
67
68 /*
69  * For consistency, the argument vdso32=[012] affects the 32-bit vDSO
70  * behavior on both 64-bit and 32-bit kernels.
71  * On 32-bit kernels, vdso=[012] means the same thing.
72  */
73 __setup("vdso32=", vdso_setup);
74
75 #ifdef CONFIG_X86_32
76 __setup_param("vdso=", vdso32_setup, vdso_setup, 0);
77
78 EXPORT_SYMBOL_GPL(vdso_enabled);
79 #endif
80
81 static __init void reloc_symtab(Elf32_Ehdr *ehdr,
82                                 unsigned offset, unsigned size)
83 {
84         Elf32_Sym *sym = (void *)ehdr + offset;
85         unsigned nsym = size / sizeof(*sym);
86         unsigned i;
87
88         for(i = 0; i < nsym; i++, sym++) {
89                 if (sym->st_shndx == SHN_UNDEF ||
90                     sym->st_shndx == SHN_ABS)
91                         continue;  /* skip */
92
93                 if (sym->st_shndx > SHN_LORESERVE) {
94                         printk(KERN_INFO "VDSO: unexpected st_shndx %x\n",
95                                sym->st_shndx);
96                         continue;
97                 }
98
99                 switch(ELF_ST_TYPE(sym->st_info)) {
100                 case STT_OBJECT:
101                 case STT_FUNC:
102                 case STT_SECTION:
103                 case STT_FILE:
104                         sym->st_value += VDSO_ADDR_ADJUST;
105                 }
106         }
107 }
108
109 static __init void reloc_dyn(Elf32_Ehdr *ehdr, unsigned offset)
110 {
111         Elf32_Dyn *dyn = (void *)ehdr + offset;
112
113         for(; dyn->d_tag != DT_NULL; dyn++)
114                 switch(dyn->d_tag) {
115                 case DT_PLTGOT:
116                 case DT_HASH:
117                 case DT_STRTAB:
118                 case DT_SYMTAB:
119                 case DT_RELA:
120                 case DT_INIT:
121                 case DT_FINI:
122                 case DT_REL:
123                 case DT_DEBUG:
124                 case DT_JMPREL:
125                 case DT_VERSYM:
126                 case DT_VERDEF:
127                 case DT_VERNEED:
128                 case DT_ADDRRNGLO ... DT_ADDRRNGHI:
129                         /* definitely pointers needing relocation */
130                         dyn->d_un.d_ptr += VDSO_ADDR_ADJUST;
131                         break;
132
133                 case DT_ENCODING ... OLD_DT_LOOS-1:
134                 case DT_LOOS ... DT_HIOS-1:
135                         /* Tags above DT_ENCODING are pointers if
136                            they're even */
137                         if (dyn->d_tag >= DT_ENCODING &&
138                             (dyn->d_tag & 1) == 0)
139                                 dyn->d_un.d_ptr += VDSO_ADDR_ADJUST;
140                         break;
141
142                 case DT_VERDEFNUM:
143                 case DT_VERNEEDNUM:
144                 case DT_FLAGS_1:
145                 case DT_RELACOUNT:
146                 case DT_RELCOUNT:
147                 case DT_VALRNGLO ... DT_VALRNGHI:
148                         /* definitely not pointers */
149                         break;
150
151                 case OLD_DT_LOOS ... DT_LOOS-1:
152                 case DT_HIOS ... DT_VALRNGLO-1:
153                 default:
154                         if (dyn->d_tag > DT_ENCODING)
155                                 printk(KERN_INFO "VDSO: unexpected DT_tag %x\n",
156                                        dyn->d_tag);
157                         break;
158                 }
159 }
160
161 static __init void relocate_vdso(Elf32_Ehdr *ehdr)
162 {
163         Elf32_Phdr *phdr;
164         Elf32_Shdr *shdr;
165         int i;
166
167         BUG_ON(memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0 ||
168                !elf_check_arch_ia32(ehdr) ||
169                ehdr->e_type != ET_DYN);
170
171         ehdr->e_entry += VDSO_ADDR_ADJUST;
172
173         /* rebase phdrs */
174         phdr = (void *)ehdr + ehdr->e_phoff;
175         for (i = 0; i < ehdr->e_phnum; i++) {
176                 phdr[i].p_vaddr += VDSO_ADDR_ADJUST;
177
178                 /* relocate dynamic stuff */
179                 if (phdr[i].p_type == PT_DYNAMIC)
180                         reloc_dyn(ehdr, phdr[i].p_offset);
181         }
182
183         /* rebase sections */
184         shdr = (void *)ehdr + ehdr->e_shoff;
185         for(i = 0; i < ehdr->e_shnum; i++) {
186                 if (!(shdr[i].sh_flags & SHF_ALLOC))
187                         continue;
188
189                 shdr[i].sh_addr += VDSO_ADDR_ADJUST;
190
191                 if (shdr[i].sh_type == SHT_SYMTAB ||
192                     shdr[i].sh_type == SHT_DYNSYM)
193                         reloc_symtab(ehdr, shdr[i].sh_offset,
194                                      shdr[i].sh_size);
195         }
196 }
197
198 static struct page *vdso32_pages[1];
199
200 #ifdef CONFIG_X86_64
201
202 #define vdso32_sysenter()       (boot_cpu_has(X86_FEATURE_SYSENTER32))
203 #define vdso32_syscall()        (boot_cpu_has(X86_FEATURE_SYSCALL32))
204
205 void __cpuinit syscall32_cpu_init(void)
206 {
207         static const struct callback_register __cpuinitconst cstar = {
208                 .type = CALLBACKTYPE_syscall32,
209                 .address = (unsigned long)ia32_cstar_target
210         };
211         static const struct callback_register __cpuinitconst sysenter = {
212                 .type = CALLBACKTYPE_sysenter,
213                 .address = (unsigned long)ia32_sysenter_target
214         };
215
216         if (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) < 0)
217                 setup_clear_cpu_cap(X86_FEATURE_SYSENTER32);
218         if (HYPERVISOR_callback_op(CALLBACKOP_register, &cstar) < 0)
219                 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
220 }
221
222 #define compat_uses_vma         1
223
224 static inline void map_compat_vdso(int map)
225 {
226 }
227
228 #else  /* CONFIG_X86_32 */
229
230 #define vdso32_sysenter()       (boot_cpu_has(X86_FEATURE_SEP))
231 #ifndef TIF_CSTAR
232 #define vdso32_syscall()        0
233 #else
234 #define vdso32_syscall()        (boot_cpu_has(X86_FEATURE_SYSCALL32))
235
236 extern asmlinkage void ia32pv_cstar_target(void);
237 static const struct callback_register __cpuinitconst cstar = {
238         .type = CALLBACKTYPE_syscall32,
239         .address = { __KERNEL_CS, (unsigned long)ia32pv_cstar_target },
240 };
241 #endif
242
243 void __cpuinit enable_sep_cpu(void)
244 {
245         extern asmlinkage void ia32pv_sysenter_target(void);
246         static struct callback_register __cpuinitdata sysenter = {
247                 .type = CALLBACKTYPE_sysenter,
248                 .address = { __KERNEL_CS, (unsigned long)ia32pv_sysenter_target },
249         };
250
251 #ifdef TIF_CSTAR
252         if (vdso32_syscall()) {
253                 if (HYPERVISOR_callback_op(CALLBACKOP_register, &cstar) != 0)
254                         BUG();
255                 return;
256         }
257 #endif
258
259         if (!vdso32_sysenter())
260                 return;
261
262         if (xen_feature(XENFEAT_supervisor_mode_kernel))
263                 sysenter.address.eip = (unsigned long)ia32_sysenter_target;
264
265         switch (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter)) {
266         case 0:
267                 break;
268 #if CONFIG_XEN_COMPAT < 0x030200
269         case -ENOSYS:
270                 sysenter.type = CALLBACKTYPE_sysenter_deprecated;
271                 if (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) == 0)
272                         break;
273 #endif
274         default:
275                 setup_clear_cpu_cap(X86_FEATURE_SEP);
276                 break;
277         }
278 }
279
280 static struct vm_area_struct gate_vma;
281
282 static int __init gate_vma_init(void)
283 {
284         gate_vma.vm_mm = NULL;
285         gate_vma.vm_start = FIXADDR_USER_START;
286         gate_vma.vm_end = FIXADDR_USER_END;
287         gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
288         gate_vma.vm_page_prot = __P101;
289
290         return 0;
291 }
292
293 #define compat_uses_vma         0
294
295 static void map_compat_vdso(int map)
296 {
297         static int vdso_mapped;
298
299         if (map == vdso_mapped)
300                 return;
301
302         vdso_mapped = map;
303
304         __set_fixmap(FIX_VDSO, page_to_pfn(vdso32_pages[0]) << PAGE_SHIFT,
305                      map ? PAGE_READONLY_EXEC : PAGE_NONE);
306
307         /* flush stray tlbs */
308         flush_tlb_all();
309 }
310
311 #endif  /* CONFIG_X86_64 */
312
313 int __init sysenter_setup(void)
314 {
315         void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
316         const void *vsyscall;
317         size_t vsyscall_len;
318
319         vdso32_pages[0] = virt_to_page(syscall_page);
320
321 #ifdef CONFIG_X86_32
322         gate_vma_init();
323
324         if (boot_cpu_has(X86_FEATURE_SYSCALL)) {
325 # ifdef TIF_CSTAR
326                 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD
327                     && HYPERVISOR_callback_op(CALLBACKOP_register, &cstar) == 0)
328                         setup_force_cpu_cap(X86_FEATURE_SYSCALL32);
329                 else
330 # endif
331                 {
332                         setup_clear_cpu_cap(X86_FEATURE_SYSCALL);
333                         setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
334                 }
335         }
336 #endif
337         if (vdso32_syscall()) {
338                 vsyscall = &vdso32_syscall_start;
339                 vsyscall_len = &vdso32_syscall_end - &vdso32_syscall_start;
340         } else if (vdso32_sysenter()){
341                 vsyscall = &vdso32_sysenter_start;
342                 vsyscall_len = &vdso32_sysenter_end - &vdso32_sysenter_start;
343         } else {
344                 vsyscall = &vdso32_int80_start;
345                 vsyscall_len = &vdso32_int80_end - &vdso32_int80_start;
346         }
347
348         memcpy(syscall_page, vsyscall, vsyscall_len);
349         relocate_vdso(syscall_page);
350
351         return 0;
352 }
353
354 /* Setup a VMA at program startup for the vsyscall page */
355 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
356 {
357         struct mm_struct *mm = current->mm;
358         unsigned long addr;
359         int ret = 0;
360         bool compat;
361
362 #ifdef CONFIG_X86_X32_ABI
363         if (test_thread_flag(TIF_X32))
364                 return x32_setup_additional_pages(bprm, uses_interp);
365 #endif
366
367         if (vdso_enabled == VDSO_DISABLED)
368                 return 0;
369
370         down_write(&mm->mmap_sem);
371
372         /* Test compat mode once here, in case someone
373            changes it via sysctl */
374         compat = (vdso_enabled == VDSO_COMPAT);
375
376         map_compat_vdso(compat);
377
378         if (compat)
379                 addr = VDSO_HIGH_BASE;
380         else {
381                 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
382                 if (IS_ERR_VALUE(addr)) {
383                         ret = addr;
384                         goto up_fail;
385                 }
386         }
387
388         current->mm->context.vdso = (void *)addr;
389
390         if (compat_uses_vma || !compat) {
391                 /*
392                  * MAYWRITE to allow gdb to COW and set breakpoints
393                  */
394                 ret = install_special_mapping(mm, addr, PAGE_SIZE,
395                                               VM_READ|VM_EXEC|
396                                               VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
397                                               vdso32_pages);
398
399                 if (ret)
400                         goto up_fail;
401         }
402
403         current_thread_info()->sysenter_return =
404                 VDSO32_SYMBOL(addr, SYSENTER_RETURN);
405
406   up_fail:
407         if (ret)
408                 current->mm->context.vdso = NULL;
409
410         up_write(&mm->mmap_sem);
411
412         return ret;
413 }
414
415 #ifdef CONFIG_X86_64
416
417 subsys_initcall(sysenter_setup);
418
419 #ifdef CONFIG_SYSCTL
420 /* Register vsyscall32 into the ABI table */
421 #include <linux/sysctl.h>
422
423 static ctl_table abi_table2[] = {
424         {
425                 .procname       = "vsyscall32",
426                 .data           = &sysctl_vsyscall32,
427                 .maxlen         = sizeof(int),
428                 .mode           = 0644,
429                 .proc_handler   = proc_dointvec
430         },
431         {}
432 };
433
434 static ctl_table abi_root_table2[] = {
435         {
436                 .procname = "abi",
437                 .mode = 0555,
438                 .child = abi_table2
439         },
440         {}
441 };
442
443 static __init int ia32_binfmt_init(void)
444 {
445         register_sysctl_table(abi_root_table2);
446         return 0;
447 }
448 __initcall(ia32_binfmt_init);
449 #endif
450
451 #else  /* CONFIG_X86_32 */
452
453 const char *arch_vma_name(struct vm_area_struct *vma)
454 {
455         if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
456                 return "[vdso]";
457         return NULL;
458 }
459
460 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
461 {
462         /*
463          * Check to see if the corresponding task was created in compat vdso
464          * mode.
465          */
466         if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
467                 return &gate_vma;
468         return NULL;
469 }
470
471 int in_gate_area(struct mm_struct *mm, unsigned long addr)
472 {
473         const struct vm_area_struct *vma = get_gate_vma(mm);
474
475         return vma && addr >= vma->vm_start && addr < vma->vm_end;
476 }
477
478 int in_gate_area_no_mm(unsigned long addr)
479 {
480         return 0;
481 }
482
483 #endif  /* CONFIG_X86_64 */