1 #ifndef _ASM_X86_SYSTEM_H
2 #define _ASM_X86_SYSTEM_H
5 #include <asm/segment.h>
6 #include <asm/cpufeature.h>
7 #include <asm/cmpxchg.h>
9 #include <asm/hypervisor.h>
11 #include <linux/kernel.h>
12 #include <linux/irqflags.h>
14 /* entries in ARCH_DLINFO: */
15 #ifdef CONFIG_IA32_EMULATION
16 # define AT_VECTOR_SIZE_ARCH 2
18 # define AT_VECTOR_SIZE_ARCH 1
21 struct task_struct; /* one of the stranger aspects of C forward declarations */
22 struct task_struct *__switch_to(struct task_struct *prev,
23 struct task_struct *next);
24 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p);
28 #ifdef CONFIG_CC_STACKPROTECTOR
29 #define __switch_canary \
30 "movl %P[task_canary](%[next]), %%ebx\n\t" \
31 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
32 #define __switch_canary_oparam \
33 , [stack_canary] "=m" (per_cpu_var(stack_canary.canary))
34 #define __switch_canary_iparam \
35 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
36 #else /* CC_STACKPROTECTOR */
37 #define __switch_canary
38 #define __switch_canary_oparam
39 #define __switch_canary_iparam
40 #endif /* CC_STACKPROTECTOR */
43 * Saving eflags is important. It switches not only IOPL between tasks,
44 * it also protects other tasks from NT leaking through sysenter etc.
46 #define switch_to(prev, next, last) \
49 * Context-switching clobbers all registers, so we clobber \
50 * them explicitly, via unused output variables. \
51 * (EAX and EBP is not listed because EBP is saved/restored \
52 * explicitly for wchan access and EAX is the return value of \
55 unsigned long ebx, ecx, edx, esi, edi; \
57 asm volatile("pushfl\n\t" /* save flags */ \
58 "pushl %%ebp\n\t" /* save EBP */ \
59 "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
60 "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
61 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
62 "pushl %[next_ip]\n\t" /* restore EIP */ \
64 "jmp __switch_to\n" /* regparm call */ \
66 "popl %%ebp\n\t" /* restore EBP */ \
67 "popfl\n" /* restore flags */ \
69 /* output parameters */ \
70 : [prev_sp] "=m" (prev->thread.sp), \
71 [prev_ip] "=m" (prev->thread.ip), \
74 /* clobbered output registers: */ \
75 "=b" (ebx), "=c" (ecx), "=d" (edx), \
76 "=S" (esi), "=D" (edi) \
78 __switch_canary_oparam \
80 /* input parameters: */ \
81 : [next_sp] "m" (next->thread.sp), \
82 [next_ip] "m" (next->thread.ip), \
84 /* regparm parameters for __switch_to(): */ \
88 __switch_canary_iparam \
90 : /* reloaded segment registers */ \
95 * disable hlt during certain critical i/o operations
97 #define HAVE_DISABLE_HLT
99 #define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
100 #define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
102 /* frame pointer must be last for get_wchan */
103 #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
104 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
106 #define __EXTRA_CLOBBER \
107 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
108 "r12", "r13", "r14", "r15"
110 #ifdef CONFIG_CC_STACKPROTECTOR
111 #define __switch_canary \
112 "movq %P[task_canary](%%rsi),%%r8\n\t" \
113 "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
114 #define __switch_canary_oparam \
115 , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary))
116 #define __switch_canary_iparam \
117 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
118 #else /* CC_STACKPROTECTOR */
119 #define __switch_canary
120 #define __switch_canary_oparam
121 #define __switch_canary_iparam
122 #endif /* CC_STACKPROTECTOR */
124 /* Save restore flags to clear handle leaking NT */
125 #define switch_to(prev, next, last) \
126 asm volatile(SAVE_CONTEXT \
127 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
128 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
129 "call __switch_to\n\t" \
130 ".globl thread_return\n" \
131 "thread_return:\n\t" \
132 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
134 "movq %P[thread_info](%%rsi),%%r8\n\t" \
135 "movq %%rax,%%rdi\n\t" \
136 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
137 "jnz ret_from_fork\n\t" \
140 __switch_canary_oparam \
141 : [next] "S" (next), [prev] "D" (prev), \
142 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
143 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
144 [_tif_fork] "i" (_TIF_FORK), \
145 [thread_info] "i" (offsetof(struct task_struct, stack)), \
146 [current_task] "m" (per_cpu_var(current_task)) \
147 __switch_canary_iparam \
148 : "memory", "cc" __EXTRA_CLOBBER)
153 extern void xen_load_gs_index(unsigned);
156 * Load a segment. Fall back on loading the zero
157 * segment if something goes wrong..
159 #define loadsegment(seg, value) \
162 "movl %k0,%%" #seg "\n" \
164 ".section .fixup,\"ax\"\n" \
166 "movl %k1, %%" #seg "\n\t" \
169 _ASM_EXTABLE(1b,3b) \
170 : :"r" (value), "r" (0) : "memory")
174 * Save a segment register away
176 #define savesegment(seg, value) \
177 asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
180 * x86_32 user gs accessors.
183 #ifdef CONFIG_X86_32_LAZY_GS
184 #define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
185 #define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
186 #define task_user_gs(tsk) ((tsk)->thread.gs)
187 #define lazy_save_gs(v) savesegment(gs, (v))
188 #define lazy_load_gs(v) loadsegment(gs, (v))
189 #else /* X86_32_LAZY_GS */
190 #define get_user_gs(regs) (u16)((regs)->gs)
191 #define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
192 #define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
193 #define lazy_save_gs(v) do { } while (0)
194 #define lazy_load_gs(v) do { } while (0)
195 #endif /* X86_32_LAZY_GS */
198 static inline unsigned long get_limit(unsigned long segment)
200 unsigned long __limit;
201 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
205 static inline void xen_clts(void)
207 HYPERVISOR_fpu_taskswitch(0);
210 static inline void xen_stts(void)
212 HYPERVISOR_fpu_taskswitch(1);
216 * Volatile isn't enough to prevent the compiler from reordering the
217 * read/write functions for the control registers and messing everything up.
218 * A memory clobber would solve the problem, but would prevent reordering of
219 * all loads stores around it, which can hurt performance. Solution is to
220 * use a variable and mimic reads and writes to it to enforce serialization
222 static unsigned long __force_order;
224 static inline unsigned long xen_read_cr0(void)
227 asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
231 static inline void xen_write_cr0(unsigned long val)
233 asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
236 #define xen_read_cr2() vcpu_info_read(arch.cr2)
237 #define xen_write_cr2(val) vcpu_info_write(arch.cr2, val)
239 static inline unsigned long xen_read_cr3(void)
242 asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
244 return mfn_to_pfn(xen_cr3_to_pfn(val)) << PAGE_SHIFT;
246 return machine_to_phys(val);
250 static inline void xen_write_cr3(unsigned long val)
253 val = xen_pfn_to_cr3(pfn_to_mfn(val >> PAGE_SHIFT));
255 val = phys_to_machine(val);
257 asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
260 static inline unsigned long xen_read_cr4(void)
263 asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
267 #define xen_read_cr4_safe() xen_read_cr4()
269 static inline void xen_write_cr4(unsigned long val)
271 asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
275 static inline unsigned long xen_read_cr8(void)
280 static inline void xen_write_cr8(unsigned long val)
286 static inline void xen_wbinvd(void)
288 asm volatile("wbinvd": : :"memory");
291 #define read_cr0() (xen_read_cr0())
292 #define write_cr0(x) (xen_write_cr0(x))
293 #define read_cr2() (xen_read_cr2())
294 #define write_cr2(x) (xen_write_cr2(x))
295 #define read_cr3() (xen_read_cr3())
296 #define write_cr3(x) (xen_write_cr3(x))
297 #define read_cr4() (xen_read_cr4())
298 #define read_cr4_safe() (xen_read_cr4_safe())
299 #define write_cr4(x) (xen_write_cr4(x))
300 #define wbinvd() (xen_wbinvd())
302 #define read_cr8() (xen_read_cr8())
303 #define write_cr8(x) (xen_write_cr8(x))
304 #define load_gs_index xen_load_gs_index
307 /* Clear the 'TS' bit */
308 #define clts() (xen_clts())
309 #define stts() (xen_stts())
311 #endif /* __KERNEL__ */
313 static inline void clflush(volatile void *__p)
315 asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
318 #define nop() asm volatile ("nop")
320 void disable_hlt(void);
321 void enable_hlt(void);
323 void cpu_idle_wait(void);
325 extern unsigned long arch_align_stack(unsigned long sp);
326 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
330 void stop_this_cpu(void *dummy);
333 * Force strict CPU ordering.
334 * And yes, this is required on UP too when we're talking
339 * Some non-Intel clones support out of order store. wmb() ceases to be a
342 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
343 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
344 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
346 #define mb() asm volatile("mfence":::"memory")
347 #define rmb() asm volatile("lfence":::"memory")
348 #define wmb() asm volatile("sfence" ::: "memory")
352 * read_barrier_depends - Flush all pending reads that subsequents reads
355 * No data-dependent reads from memory-like regions are ever reordered
356 * over this barrier. All reads preceding this primitive are guaranteed
357 * to access memory (but not necessarily other CPUs' caches) before any
358 * reads following this primitive that depend on the data return by
359 * any of the preceding reads. This primitive is much lighter weight than
360 * rmb() on most CPUs, and is never heavier weight than is
363 * These ordering constraints are respected by both the local CPU
366 * Ordering is not guaranteed by anything other than these primitives,
367 * not even by data dependencies. See the documentation for
368 * memory_barrier() for examples and URLs to more information.
370 * For example, the following code would force ordering (the initial
371 * value of "a" is zero, "b" is one, and "p" is "&a"):
379 * read_barrier_depends();
383 * because the read of "*q" depends on the read of "p" and these
384 * two reads are separated by a read_barrier_depends(). However,
385 * the following code, with the same initial values for "a" and "b":
393 * read_barrier_depends();
397 * does not enforce ordering, since there is no data dependency between
398 * the read of "a" and the read of "b". Therefore, on some CPUs, such
399 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
400 * in cases like this where there are no data dependencies.
403 #define read_barrier_depends() do { } while (0)
406 #define smp_mb() mb()
407 #ifdef CONFIG_X86_PPRO_FENCE
408 # define smp_rmb() rmb()
410 # define smp_rmb() barrier()
412 #ifdef CONFIG_X86_OOSTORE
413 # define smp_wmb() wmb()
415 # define smp_wmb() barrier()
417 #define smp_read_barrier_depends() read_barrier_depends()
418 #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
420 #define smp_mb() barrier()
421 #define smp_rmb() barrier()
422 #define smp_wmb() barrier()
423 #define smp_read_barrier_depends() do { } while (0)
424 #define set_mb(var, value) do { var = value; barrier(); } while (0)
428 * Stop RDTSC speculation. This is needed when you need to use RDTSC
429 * (or get_cycles or vread that possibly accesses the TSC) in a defined
432 * (Could use an alternative three way for this if there was one.)
434 static inline void rdtsc_barrier(void)
436 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
437 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
440 #endif /* _ASM_X86_SYSTEM_H */