- patches.rt/0001-sched-count-of-queued-RT-tasks.patch: Delete.
[linux-flexiantxendom0-3.2.10.git] / include / asm-x86 / mach-xen / asm / system_64.h
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
3
4 #include <linux/kernel.h>
5 #include <asm/segment.h>
6 #include <asm/cmpxchg.h>
7
8 #include <asm/synch_bitops.h>
9 #include <asm/hypervisor.h>
10 #include <xen/interface/arch-x86_64.h>
11
12 #ifdef __KERNEL__
13
14 /* entries in ARCH_DLINFO: */
15 #ifdef CONFIG_IA32_EMULATION
16 # define AT_VECTOR_SIZE_ARCH 2
17 #else
18 # define AT_VECTOR_SIZE_ARCH 1
19 #endif
20
21 #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
22 #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
23
24 /* frame pointer must be last for get_wchan */
25 #define SAVE_CONTEXT    "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
26 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\n\t"
27
28 #define __EXTRA_CLOBBER  \
29         ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
30
31 /* Save restore flags to clear handle leaking NT */
32 #define switch_to(prev,next,last) \
33         asm volatile(SAVE_CONTEXT                                                   \
34                      "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */       \
35                      "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */    \
36                      "call __switch_to\n\t"                                       \
37                      ".globl thread_return\n"                                   \
38                      "thread_return:\n\t"                                           \
39                      "movq %%gs:%P[pda_pcurrent],%%rsi\n\t"                       \
40                      "movq %P[thread_info](%%rsi),%%r8\n\t"                       \
41                      LOCK_PREFIX "btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"        \
42                      "movq %%rax,%%rdi\n\t"                                       \
43                      "jc   ret_from_fork\n\t"                                     \
44                      RESTORE_CONTEXT                                                \
45                      : "=a" (last)                                                \
46                      : [next] "S" (next), [prev] "D" (prev),                      \
47                        [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
48                        [ti_flags] "i" (offsetof(struct thread_info, flags)),\
49                        [tif_fork] "i" (TIF_FORK),                         \
50                        [thread_info] "i" (offsetof(struct task_struct, stack)), \
51                        [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))   \
52                      : "memory", "cc" __EXTRA_CLOBBER)
53     
54 extern void load_gs_index(unsigned); 
55
56 /*
57  * Load a segment. Fall back on loading the zero
58  * segment if something goes wrong..
59  */
60 #define loadsegment(seg,value)  \
61         asm volatile("\n"                       \
62                 "1:\t"                          \
63                 "movl %k0,%%" #seg "\n"         \
64                 "2:\n"                          \
65                 ".section .fixup,\"ax\"\n"      \
66                 "3:\t"                          \
67                 "movl %1,%%" #seg "\n\t"        \
68                 "jmp 2b\n"                      \
69                 ".previous\n"                   \
70                 ".section __ex_table,\"a\"\n\t" \
71                 ".align 8\n\t"                  \
72                 ".quad 1b,3b\n"                 \
73                 ".previous"                     \
74                 : :"r" (value), "r" (0))
75
76 /*
77  * Clear and set 'TS' bit respectively
78  */
79 #define clts() (HYPERVISOR_fpu_taskswitch(0))
80
81 static inline unsigned long read_cr0(void)
82
83         unsigned long cr0;
84         asm volatile("movq %%cr0,%0" : "=r" (cr0));
85         return cr0;
86 }
87
88 static inline void write_cr0(unsigned long val) 
89
90         asm volatile("movq %0,%%cr0" :: "r" (val));
91 }
92
93 #define read_cr2() current_vcpu_info()->arch.cr2
94
95 #define write_cr2(val) ((void)(current_vcpu_info()->arch.cr2 = (val)))
96
97 #define read_cr3() ({ \
98         unsigned long __dummy; \
99         asm volatile("movq %%cr3,%0" : "=r" (__dummy)); \
100         machine_to_phys(__dummy); \
101 })
102
103 static inline void write_cr3(unsigned long val)
104 {
105         val = phys_to_machine(val);
106         asm volatile("movq %0,%%cr3" :: "r" (val) : "memory");
107 }
108
109 static inline unsigned long read_cr4(void)
110
111         unsigned long cr4;
112         asm volatile("movq %%cr4,%0" : "=r" (cr4));
113         return cr4;
114 }
115
116 static inline void write_cr4(unsigned long val)
117
118         asm volatile("movq %0,%%cr4" :: "r" (val) : "memory");
119 }
120
121 static inline unsigned long read_cr8(void)
122 {
123         return 0;
124 }
125
126 static inline void write_cr8(unsigned long val)
127 {
128         BUG_ON(val);
129 }
130
131 #define stts() (HYPERVISOR_fpu_taskswitch(1))
132
133 #define wbinvd() \
134         __asm__ __volatile__ ("wbinvd": : :"memory")
135
136 /*
137  * On SMP systems, when the scheduler does migration-cost autodetection,
138  * it needs a way to flush as much of the CPU's caches as possible.
139  */
140 static inline void sched_cacheflush(void)
141 {
142         wbinvd();
143 }
144
145 #endif  /* __KERNEL__ */
146
147 static inline void clflush(volatile void *__p)
148 {
149         asm volatile("clflush %0" : "+m" (*(char __force *)__p));
150 }
151
152 #define nop() __asm__ __volatile__ ("nop")
153
154 #ifdef CONFIG_SMP
155 #define smp_mb()        mb()
156 #define smp_rmb()       barrier()
157 #define smp_wmb()       barrier()
158 #define smp_read_barrier_depends()      do {} while(0)
159 #else
160 #define smp_mb()        barrier()
161 #define smp_rmb()       barrier()
162 #define smp_wmb()       barrier()
163 #define smp_read_barrier_depends()      do {} while(0)
164 #endif
165
166     
167 /*
168  * Force strict CPU ordering.
169  * And yes, this is required on UP too when we're talking
170  * to devices.
171  */
172 #define mb()    asm volatile("mfence":::"memory")
173 #define rmb()   asm volatile("lfence":::"memory")
174 #define wmb()   asm volatile("sfence" ::: "memory")
175
176 #define read_barrier_depends()  do {} while(0)
177 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
178
179 #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
180
181 #include <linux/irqflags.h>
182
183 void cpu_idle_wait(void);
184
185 extern unsigned long arch_align_stack(unsigned long sp);
186 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
187
188 #endif