- Update Xen patches to 3.3-rc5 and c/s 1157.
[linux-flexiantxendom0-3.2.10.git] / arch / x86 / kernel / process_64-xen.c
1 /*
2  *  Copyright (C) 1995  Linus Torvalds
3  *
4  *  Pentium III FXSR, SSE support
5  *      Gareth Hughes <gareth@valinux.com>, May 2000
6  *
7  *  X86-64 port
8  *      Andi Kleen.
9  *
10  *      CPU hotplug support - ashok.raj@intel.com
11  * 
12  *  Jun Nakajima <jun.nakajima@intel.com> 
13  *     Modified for Xen
14  */
15
16 /*
17  * This file handles the architecture-dependent parts of process handling..
18  */
19
20 #include <linux/stackprotector.h>
21 #include <linux/cpu.h>
22 #include <linux/errno.h>
23 #include <linux/sched.h>
24 #include <linux/fs.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/elfcore.h>
28 #include <linux/smp.h>
29 #include <linux/slab.h>
30 #include <linux/user.h>
31 #include <linux/interrupt.h>
32 #include <linux/delay.h>
33 #include <linux/module.h>
34 #include <linux/ptrace.h>
35 #include <linux/notifier.h>
36 #include <linux/kprobes.h>
37 #include <linux/kdebug.h>
38 #include <linux/tick.h>
39 #include <linux/prctl.h>
40 #include <linux/uaccess.h>
41 #include <linux/io.h>
42 #include <linux/ftrace.h>
43 #include <linux/cpuidle.h>
44
45 #include <asm/pgtable.h>
46 #include <asm/system.h>
47 #include <asm/processor.h>
48 #include <asm/i387.h>
49 #include <asm/mmu_context.h>
50 #include <asm/prctl.h>
51 #include <xen/interface/physdev.h>
52 #include <asm/desc.h>
53 #include <asm/proto.h>
54 #include <asm/hardirq.h>
55 #include <asm/ia32.h>
56 #include <asm/idle.h>
57 #include <asm/syscalls.h>
58 #include <asm/debugreg.h>
59 #include <asm/nmi.h>
60
61 asmlinkage extern void ret_from_fork(void);
62
63 static DEFINE_PER_CPU(unsigned char, is_idle);
64
65 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
66
67 void idle_notifier_register(struct notifier_block *n)
68 {
69         atomic_notifier_chain_register(&idle_notifier, n);
70 }
71 EXPORT_SYMBOL_GPL(idle_notifier_register);
72
73 void idle_notifier_unregister(struct notifier_block *n)
74 {
75         atomic_notifier_chain_unregister(&idle_notifier, n);
76 }
77 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
78
79 void enter_idle(void)
80 {
81         percpu_write(is_idle, 1);
82         atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
83 }
84
85 static void __exit_idle(void)
86 {
87         if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
88                 return;
89         atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
90 }
91
92 /* Called from interrupts to signify idle end */
93 void exit_idle(void)
94 {
95         /* idle loop has pid 0 */
96         if (current->pid)
97                 return;
98         __exit_idle();
99 }
100
101 #ifndef CONFIG_SMP
102 static inline void play_dead(void)
103 {
104         BUG();
105 }
106 #endif
107
108 /*
109  * The idle thread. There's no useful work to be
110  * done, so just try to conserve power and have a
111  * low exit latency (ie sit in a loop waiting for
112  * somebody to say that they'd like to reschedule)
113  */
114 void cpu_idle(void)
115 {
116         current_thread_info()->status |= TS_POLLING;
117
118         /*
119          * If we're the non-boot CPU, nothing set the stack canary up
120          * for us.  CPU0 already has it initialized but no harm in
121          * doing it again.  This is a good place for updating it, as
122          * we wont ever return from this function (so the invalid
123          * canaries already on the stack wont ever trigger).
124          */
125         boot_init_stack_canary();
126
127         /* endless idle loop with no priority at all */
128         while (1) {
129                 tick_nohz_idle_enter();
130                 while (!need_resched()) {
131
132                         rmb();
133
134                         if (cpu_is_offline(smp_processor_id()))
135                                 play_dead();
136                         /*
137                          * Idle routines should keep interrupts disabled
138                          * from here on, until they go to idle.
139                          * Otherwise, idle callbacks can misfire.
140                          */
141                         local_touch_nmi();
142                         local_irq_disable();
143                         enter_idle();
144                         /* Don't trace irqs off for idle */
145                         stop_critical_timings();
146
147                         /* enter_idle() needs rcu for notifiers */
148                         rcu_idle_enter();
149
150                         if (cpuidle_idle_call())
151                                 xen_idle();
152
153                         rcu_idle_exit();
154                         start_critical_timings();
155
156                         /* In many cases the interrupt that ended idle
157                            has already called exit_idle. But some idle
158                            loops can be woken up without interrupt. */
159                         __exit_idle();
160                 }
161
162                 tick_nohz_idle_exit();
163                 preempt_enable_no_resched();
164                 schedule();
165                 preempt_disable();
166         }
167 }
168
169 /* Prints also some state that isn't saved in the pt_regs */
170 void __show_regs(struct pt_regs *regs, int all)
171 {
172         unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
173         unsigned long d0, d1, d2, d3, d6, d7;
174         unsigned int fsindex, gsindex;
175         unsigned int ds, cs, es;
176
177         show_regs_common();
178         printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
179         printk_address(regs->ip, 1);
180         printk(KERN_DEFAULT "RSP: %04lx:%016lx  EFLAGS: %08lx\n", regs->ss,
181                         regs->sp, regs->flags);
182         printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
183                regs->ax, regs->bx, regs->cx);
184         printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
185                regs->dx, regs->si, regs->di);
186         printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
187                regs->bp, regs->r8, regs->r9);
188         printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
189                regs->r10, regs->r11, regs->r12);
190         printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
191                regs->r13, regs->r14, regs->r15);
192
193         asm("movl %%ds,%0" : "=r" (ds));
194         asm("movl %%cs,%0" : "=r" (cs));
195         asm("movl %%es,%0" : "=r" (es));
196         asm("mov %%fs,%0" : "=r" (fsindex));
197         asm("mov %%gs,%0" : "=r" (gsindex));
198
199         rdmsrl(MSR_FS_BASE, fs);
200         rdmsrl(MSR_GS_BASE, gs);
201         rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
202
203         if (!all)
204                 return;
205
206         cr0 = read_cr0();
207         cr2 = read_cr2();
208         cr3 = read_cr3();
209         cr4 = read_cr4();
210
211         printk(KERN_DEFAULT "FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
212                fs, fsindex, gs, gsindex, shadowgs);
213         printk(KERN_DEFAULT "CS:  %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
214                         es, cr0);
215         printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
216                         cr4);
217
218         get_debugreg(d0, 0);
219         get_debugreg(d1, 1);
220         get_debugreg(d2, 2);
221         printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
222         get_debugreg(d3, 3);
223         get_debugreg(d6, 6);
224         get_debugreg(d7, 7);
225         printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
226 }
227
228 void xen_load_gs_index(unsigned gs)
229 {
230         WARN_ON(HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gs));
231 }
232 EXPORT_SYMBOL(xen_load_gs_index);
233
234 void release_thread(struct task_struct *dead_task)
235 {
236         if (dead_task->mm) {
237                 if (dead_task->mm->context.size) {
238                         printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
239                                         dead_task->comm,
240                                         dead_task->mm->context.ldt,
241                                         dead_task->mm->context.size);
242                         BUG();
243                 }
244         }
245 }
246
247 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
248 {
249         struct user_desc ud = {
250                 .base_addr = addr,
251                 .limit = 0xfffff,
252                 .seg_32bit = 1,
253                 .limit_in_pages = 1,
254                 .useable = 1,
255         };
256         struct desc_struct *desc = t->thread.tls_array;
257         desc += tls;
258         fill_ldt(desc, &ud);
259 }
260
261 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
262 {
263         return get_desc_base(&t->thread.tls_array[tls]);
264 }
265
266 /*
267  * This gets called before we allocate a new thread and copy
268  * the current task into it.
269  */
270 void prepare_to_copy(struct task_struct *tsk)
271 {
272         unlazy_fpu(tsk);
273 }
274
275 int copy_thread(unsigned long clone_flags, unsigned long sp,
276                 unsigned long unused,
277         struct task_struct *p, struct pt_regs *regs)
278 {
279         int err;
280         struct pt_regs *childregs;
281         struct task_struct *me = current;
282
283         childregs = ((struct pt_regs *)
284                         (THREAD_SIZE + task_stack_page(p))) - 1;
285         *childregs = *regs;
286
287         childregs->ax = 0;
288         if (user_mode(regs))
289                 childregs->sp = sp;
290         else
291                 childregs->sp = (unsigned long)childregs;
292
293         p->thread.sp = (unsigned long) childregs;
294         p->thread.sp0 = (unsigned long) (childregs+1);
295
296         set_tsk_thread_flag(p, TIF_FORK);
297
298         p->fpu_counter = 0;
299         p->thread.io_bitmap_ptr = NULL;
300
301         savesegment(gs, p->thread.gsindex);
302         p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
303         savesegment(fs, p->thread.fsindex);
304         p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
305         savesegment(es, p->thread.es);
306         savesegment(ds, p->thread.ds);
307
308         err = -ENOMEM;
309         memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
310
311         if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
312                 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
313                                                   IO_BITMAP_BYTES, GFP_KERNEL);
314                 if (!p->thread.io_bitmap_ptr) {
315                         p->thread.io_bitmap_max = 0;
316                         return -ENOMEM;
317                 }
318                 set_tsk_thread_flag(p, TIF_IO_BITMAP);
319         }
320
321         /*
322          * Set a new TLS for the child thread?
323          */
324         if (clone_flags & CLONE_SETTLS) {
325 #ifdef CONFIG_IA32_EMULATION
326                 if (test_thread_flag(TIF_IA32))
327                         err = do_set_thread_area(p, -1,
328                                 (struct user_desc __user *)childregs->si, 0);
329                 else
330 #endif
331                         err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
332                 if (err)
333                         goto out;
334         }
335         p->thread.iopl = current->thread.iopl;
336
337         err = 0;
338 out:
339         if (err && p->thread.io_bitmap_ptr) {
340                 kfree(p->thread.io_bitmap_ptr);
341                 p->thread.io_bitmap_max = 0;
342         }
343
344         return err;
345 }
346
347 static void
348 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
349                     unsigned long new_sp,
350                     unsigned int _cs, unsigned int _ss, unsigned int _ds)
351 {
352         loadsegment(fs, 0);
353         loadsegment(es, _ds);
354         loadsegment(ds, _ds);
355         load_gs_index(0);
356         regs->ip                = new_ip;
357         regs->sp                = new_sp;
358         regs->cs                = _cs;
359         regs->ss                = _ss;
360         regs->flags             = X86_EFLAGS_IF;
361         /*
362          * Free the old FP and other extended state
363          */
364         free_thread_xstate(current);
365 }
366
367 void
368 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
369 {
370         start_thread_common(regs, new_ip, new_sp,
371                             __USER_CS, __USER_DS, 0);
372 }
373
374 #ifdef CONFIG_IA32_EMULATION
375 void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
376 {
377         start_thread_common(regs, new_ip, new_sp,
378                             __USER32_CS, __USER32_DS, __USER32_DS);
379 }
380 #endif
381
382 /*
383  *      switch_to(x,y) should switch tasks from x to y.
384  *
385  * This could still be optimized:
386  * - fold all the options into a flag word and test it with a single test.
387  * - could test fs/gs bitsliced
388  *
389  * Kprobes not supported here. Set the probe on schedule instead.
390  * Function graph tracer not supported too.
391  */
392 __notrace_funcgraph struct task_struct *
393 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
394 {
395         struct thread_struct *prev = &prev_p->thread;
396         struct thread_struct *next = &next_p->thread;
397         int cpu = smp_processor_id();
398 #ifndef CONFIG_X86_NO_TSS
399         struct tss_struct *tss = &per_cpu(init_tss, cpu);
400 #endif
401         fpu_switch_t fpu;
402 #if CONFIG_XEN_COMPAT > 0x030002
403         struct physdev_set_iopl iopl_op;
404         struct physdev_set_iobitmap iobmp_op;
405 #else
406         struct physdev_op _pdo[2], *pdo = _pdo;
407 #define iopl_op pdo->u.set_iopl
408 #define iobmp_op pdo->u.set_iobitmap
409 #endif
410         multicall_entry_t _mcl[8], *mcl = _mcl;
411
412         fpu = xen_switch_fpu_prepare(prev_p, next_p, cpu, &mcl);
413
414         /*
415          * Reload sp0.
416          * This is load_sp0(tss, next) with a multicall.
417          */
418         mcl->op      = __HYPERVISOR_stack_switch;
419         mcl->args[0] = __KERNEL_DS;
420         mcl->args[1] = next->sp0;
421         mcl++;
422
423         /*
424          * Load the per-thread Thread-Local Storage descriptor.
425          * This is load_TLS(next, cpu) with multicalls.
426          */
427 #define C(i) do {                                                       \
428         if (unlikely(next->tls_array[i].a != prev->tls_array[i].a ||    \
429                      next->tls_array[i].b != prev->tls_array[i].b)) {   \
430                 mcl->op      = __HYPERVISOR_update_descriptor;          \
431                 mcl->args[0] = arbitrary_virt_to_machine(               \
432                         &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\
433                 mcl->args[1] = *(u64 *)&next->tls_array[i];             \
434                 mcl++;                                                  \
435         }                                                               \
436 } while (0)
437         C(0); C(1); C(2);
438 #undef C
439
440         if (unlikely(prev->iopl != next->iopl)) {
441                 iopl_op.iopl = (next->iopl == 0) ? 1 : (next->iopl >> 12) & 3;
442 #if CONFIG_XEN_COMPAT > 0x030002
443                 mcl->op      = __HYPERVISOR_physdev_op;
444                 mcl->args[0] = PHYSDEVOP_set_iopl;
445                 mcl->args[1] = (unsigned long)&iopl_op;
446 #else
447                 mcl->op      = __HYPERVISOR_physdev_op_compat;
448                 pdo->cmd     = PHYSDEVOP_set_iopl;
449                 mcl->args[0] = (unsigned long)pdo++;
450 #endif
451                 mcl++;
452         }
453
454         if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
455                 set_xen_guest_handle(iobmp_op.bitmap,
456                                      (char *)next->io_bitmap_ptr);
457                 iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
458 #if CONFIG_XEN_COMPAT > 0x030002
459                 mcl->op      = __HYPERVISOR_physdev_op;
460                 mcl->args[0] = PHYSDEVOP_set_iobitmap;
461                 mcl->args[1] = (unsigned long)&iobmp_op;
462 #else
463                 mcl->op      = __HYPERVISOR_physdev_op_compat;
464                 pdo->cmd     = PHYSDEVOP_set_iobitmap;
465                 mcl->args[0] = (unsigned long)pdo++;
466 #endif
467                 mcl++;
468         }
469
470 #if CONFIG_XEN_COMPAT <= 0x030002
471         BUG_ON(pdo > _pdo + ARRAY_SIZE(_pdo));
472 #endif
473         BUG_ON(mcl > _mcl + ARRAY_SIZE(_mcl));
474         if (unlikely(HYPERVISOR_multicall_check(_mcl, mcl - _mcl, NULL)))
475                 BUG();
476
477         /*
478          * Switch DS and ES.
479          * This won't pick up thread selector changes, but I guess that is ok.
480          */
481         if (unlikely(next->es))
482                 loadsegment(es, next->es);
483
484         if (unlikely(next->ds))
485                 loadsegment(ds, next->ds);
486
487         /*
488          * Leave lazy mode, flushing any hypercalls made here.
489          * This must be done before restoring TLS segments so
490          * the GDT and LDT are properly updated, and must be
491          * done before math_state_restore, so the TS bit is up
492          * to date.
493          */
494         arch_end_context_switch(next_p);
495
496         /*
497          * Switch FS and GS.
498          *
499          * Segment register != 0 always requires a reload.  Also
500          * reload when it has changed.  When prev process used 64bit
501          * base always reload to avoid an information leak.
502          */
503         if (unlikely(next->fsindex))
504                 loadsegment(fs, next->fsindex);
505
506         if (next->fs)
507                 WARN_ON(HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs));
508         
509         if (unlikely(next->gsindex))
510                 load_gs_index(next->gsindex);
511
512         if (next->gs)
513                 WARN_ON(HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs));
514
515         switch_fpu_finish(next_p, fpu);
516
517         /*
518          * Switch the PDA context.
519          */
520         percpu_write(current_task, next_p);
521
522         percpu_write(kernel_stack,
523                   (unsigned long)task_stack_page(next_p) +
524                   THREAD_SIZE - KERNEL_STACK_OFFSET);
525
526         /*
527          * Now maybe reload the debug registers
528          */
529         if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
530                      task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
531                 __switch_to_xtra(prev_p, next_p);
532
533         return prev_p;
534 }
535
536 void set_personality_64bit(void)
537 {
538         /* inherit personality from parent */
539
540         /* Make sure to be in 64bit mode */
541         clear_thread_flag(TIF_IA32);
542
543         /* Ensure the corresponding mm is not marked. */
544         if (current->mm)
545                 current->mm->context.ia32_compat = 0;
546
547         /* TBD: overwrites user setup. Should have two bits.
548            But 64bit processes have always behaved this way,
549            so it's not too bad. The main problem is just that
550            32bit childs are affected again. */
551         current->personality &= ~READ_IMPLIES_EXEC;
552 }
553
554 void set_personality_ia32(void)
555 {
556         /* inherit personality from parent */
557
558         /* Make sure to be in 32bit mode */
559         set_thread_flag(TIF_IA32);
560         current->personality |= force_personality32;
561
562         /* Mark the associated mm as containing 32-bit tasks. */
563         if (current->mm)
564                 current->mm->context.ia32_compat = 1;
565
566         /* Prepare the first "return" to user space */
567         current_thread_info()->status |= TS_COMPAT;
568 }
569
570 unsigned long get_wchan(struct task_struct *p)
571 {
572         unsigned long stack;
573         u64 fp, ip;
574         int count = 0;
575
576         if (!p || p == current || p->state == TASK_RUNNING)
577                 return 0;
578         stack = (unsigned long)task_stack_page(p);
579         if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
580                 return 0;
581         fp = *(u64 *)(p->thread.sp);
582         do {
583                 if (fp < (unsigned long)stack ||
584                     fp >= (unsigned long)stack+THREAD_SIZE)
585                         return 0;
586                 ip = *(u64 *)(fp+8);
587                 if (!in_sched_functions(ip))
588                         return ip;
589                 fp = *(u64 *)fp;
590         } while (count++ < 16);
591         return 0;
592 }
593
594 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
595 {
596         int ret = 0;
597         int doit = task == current;
598         int cpu;
599
600         switch (code) {
601         case ARCH_SET_GS:
602                 if (addr >= TASK_SIZE_OF(task))
603                         return -EPERM;
604                 cpu = get_cpu();
605                 /* handle small bases via the GDT because that's faster to
606                    switch. */
607                 if (addr <= 0xffffffff) {
608                         set_32bit_tls(task, GS_TLS, addr);
609                         if (doit) {
610                                 load_TLS(&task->thread, cpu);
611                                 load_gs_index(GS_TLS_SEL);
612                         }
613                         task->thread.gsindex = GS_TLS_SEL;
614                         task->thread.gs = 0;
615                 } else {
616                         task->thread.gsindex = 0;
617                         task->thread.gs = addr;
618                         if (doit) {
619                                 load_gs_index(0);
620                                 ret = HYPERVISOR_set_segment_base(
621                                         SEGBASE_GS_USER, addr);
622                         }
623                 }
624                 put_cpu();
625                 break;
626         case ARCH_SET_FS:
627                 /* Not strictly needed for fs, but do it for symmetry
628                    with gs */
629                 if (addr >= TASK_SIZE_OF(task))
630                         return -EPERM;
631                 cpu = get_cpu();
632                 /* handle small bases via the GDT because that's faster to
633                    switch. */
634                 if (addr <= 0xffffffff) {
635                         set_32bit_tls(task, FS_TLS, addr);
636                         if (doit) {
637                                 load_TLS(&task->thread, cpu);
638                                 loadsegment(fs, FS_TLS_SEL);
639                         }
640                         task->thread.fsindex = FS_TLS_SEL;
641                         task->thread.fs = 0;
642                 } else {
643                         task->thread.fsindex = 0;
644                         task->thread.fs = addr;
645                         if (doit) {
646                                 /* set the selector to 0 to not confuse
647                                    __switch_to */
648                                 loadsegment(fs, 0);
649                                 ret = HYPERVISOR_set_segment_base(SEGBASE_FS,
650                                                                   addr);
651                         }
652                 }
653                 put_cpu();
654                 break;
655         case ARCH_GET_FS: {
656                 unsigned long base;
657                 if (task->thread.fsindex == FS_TLS_SEL)
658                         base = read_32bit_tls(task, FS_TLS);
659                 else if (doit)
660                         rdmsrl(MSR_FS_BASE, base);
661                 else
662                         base = task->thread.fs;
663                 ret = put_user(base, (unsigned long __user *)addr);
664                 break;
665         }
666         case ARCH_GET_GS: {
667                 unsigned long base;
668                 unsigned gsindex;
669                 if (task->thread.gsindex == GS_TLS_SEL)
670                         base = read_32bit_tls(task, GS_TLS);
671                 else if (doit) {
672                         savesegment(gs, gsindex);
673                         if (gsindex)
674                                 rdmsrl(MSR_KERNEL_GS_BASE, base);
675                         else
676                                 base = task->thread.gs;
677                 } else
678                         base = task->thread.gs;
679                 ret = put_user(base, (unsigned long __user *)addr);
680                 break;
681         }
682
683         default:
684                 ret = -EINVAL;
685                 break;
686         }
687
688         return ret;
689 }
690
691 long sys_arch_prctl(int code, unsigned long addr)
692 {
693         return do_arch_prctl(current, code, addr);
694 }
695