- Update Xen patches to 3.3-rc5 and c/s 1157.
[linux-flexiantxendom0-3.2.10.git] / arch / x86 / include / mach-xen / asm / irqflags.h
1 #ifndef _X86_IRQFLAGS_H_
2 #define _X86_IRQFLAGS_H_
3
4 #include <asm/smp-processor-id.h>
5
6 #ifndef __ASSEMBLY__
7 #include <linux/types.h>
8 #include <xen/interface/vcpu.h>
9 /*
10  * The use of 'barrier' in the following reflects their use as local-lock
11  * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
12  * critical operations are executed. All critical operations must complete
13  * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
14  * includes these barriers, for example.
15  */
16
17 #define xen_save_fl(void) vcpu_info_read(evtchn_upcall_mask)
18
19 #define xen_restore_fl(f)                                       \
20 do {                                                            \
21         vcpu_info_t *_vcpu;                                     \
22         barrier();                                              \
23         _vcpu = current_vcpu_info();                            \
24         if ((_vcpu->evtchn_upcall_mask = (f)) == 0) {           \
25                 barrier(); /* unmask then check (avoid races) */\
26                 if (unlikely(_vcpu->evtchn_upcall_pending))     \
27                         force_evtchn_callback();                \
28         }                                                       \
29 } while (0)
30
31 #define xen_irq_disable()                                       \
32 do {                                                            \
33         vcpu_info_write(evtchn_upcall_mask, 1);                 \
34         barrier();                                              \
35 } while (0)
36
37 #define xen_irq_enable()                                        \
38 do {                                                            \
39         vcpu_info_t *_vcpu;                                     \
40         barrier();                                              \
41         _vcpu = current_vcpu_info();                            \
42         _vcpu->evtchn_upcall_mask = 0;                          \
43         barrier(); /* unmask then check (avoid races) */        \
44         if (unlikely(_vcpu->evtchn_upcall_pending))             \
45                 force_evtchn_callback();                        \
46 } while (0)
47
48 #define arch_local_save_flags() xen_save_fl()
49
50 #define arch_local_irq_restore(flags) xen_restore_fl(flags)
51
52 #define arch_local_irq_disable()        xen_irq_disable()
53
54 #define arch_local_irq_enable() xen_irq_enable()
55
56 /*
57  * Used in the idle loop; sti takes one instruction cycle
58  * to complete:
59  */
60 #define arch_safe_halt HYPERVISOR_block
61
62 /*
63  * Used when interrupts are already enabled or to
64  * shutdown the processor:
65  */
66 #define halt() VOID(irqs_disabled()                                     \
67                     ? HYPERVISOR_vcpu_op(VCPUOP_down,                   \
68                                          smp_processor_id(), NULL)      \
69                     : 0)
70
71 /*
72  * For spinlocks, etc:
73  */
74 #define arch_local_irq_save()                                           \
75 ({                                                                      \
76         unsigned long flags = arch_local_save_flags();                  \
77                                                                         \
78         arch_local_irq_disable();                                       \
79                                                                         \
80         flags;                                                          \
81 })
82 #else
83
84 /* Offsets into shared_info_t. */
85 #define evtchn_upcall_pending           /* 0 */
86 #define evtchn_upcall_mask              1
87
88 #ifdef CONFIG_X86_64
89 # define __REG_si %rsi
90 # define __CPU_num PER_CPU_VAR(cpu_number)
91 #else
92 # define __REG_si %esi
93 # define __CPU_num TI_cpu(%ebp)
94 #endif
95
96 #ifdef CONFIG_XEN_VCPU_INFO_PLACEMENT
97
98 #define GET_VCPU_INFO           PER_CPU(vcpu_info, __REG_si)
99 #define __DISABLE_INTERRUPTS    movb $1,PER_CPU_VAR(vcpu_info+evtchn_upcall_mask)
100 #define __ENABLE_INTERRUPTS     movb $0,PER_CPU_VAR(vcpu_info+evtchn_upcall_mask)
101 #define __TEST_PENDING          cmpb $0,PER_CPU_VAR(vcpu_info+evtchn_upcall_pending+0)
102 #define DISABLE_INTERRUPTS(clb) __DISABLE_INTERRUPTS
103 #define ENABLE_INTERRUPTS(clb)  __ENABLE_INTERRUPTS
104
105 #define __SIZEOF_DISABLE_INTERRUPTS 8
106 #define __SIZEOF_TEST_PENDING   8
107
108 #else /* CONFIG_XEN_VCPU_INFO_PLACEMENT */
109
110 #define sizeof_vcpu_shift       6
111
112 #ifdef CONFIG_SMP
113 #define GET_VCPU_INFO           movl __CPU_num,%esi                     ; \
114                                 shl $sizeof_vcpu_shift,%esi             ; \
115                                 add HYPERVISOR_shared_info,__REG_si
116 #else
117 #define GET_VCPU_INFO           mov HYPERVISOR_shared_info,__REG_si
118 #endif
119
120 #define __DISABLE_INTERRUPTS    movb $1,evtchn_upcall_mask(__REG_si)
121 #define __ENABLE_INTERRUPTS     movb $0,evtchn_upcall_mask(__REG_si)
122 #define __TEST_PENDING          testb $0xFF,evtchn_upcall_pending(__REG_si)
123 #define DISABLE_INTERRUPTS(clb) GET_VCPU_INFO                           ; \
124                                 __DISABLE_INTERRUPTS
125 #define ENABLE_INTERRUPTS(clb)  GET_VCPU_INFO                           ; \
126                                 __ENABLE_INTERRUPTS
127
128 #define __SIZEOF_DISABLE_INTERRUPTS 4
129 #define __SIZEOF_TEST_PENDING   3
130
131 #endif /* CONFIG_XEN_VCPU_INFO_PLACEMENT */
132
133 #ifndef CONFIG_X86_64
134 #define INTERRUPT_RETURN                iret
135 #define ENABLE_INTERRUPTS_SYSEXIT                                         \
136         movb $0,evtchn_upcall_mask(%esi) /* __ENABLE_INTERRUPTS */      ; \
137 sysexit_scrit:  /**** START OF SYSEXIT CRITICAL REGION ****/            ; \
138         cmpb $0,evtchn_upcall_pending(%esi) /* __TEST_PENDING */        ; \
139         jnz  14f        /* process more events if necessary... */       ; \
140         movl PT_ESI(%esp), %esi                                         ; \
141         sysexit                                                         ; \
142 14:     movb $1,evtchn_upcall_mask(%esi) /* __DISABLE_INTERRUPTS */     ; \
143         TRACE_IRQS_OFF                                                  ; \
144 sysexit_ecrit:  /**** END OF SYSEXIT CRITICAL REGION ****/              ; \
145         mov  $__KERNEL_PERCPU, %ecx                                     ; \
146         push %esp                                                       ; \
147         mov  %ecx, %fs                                                  ; \
148         SET_KERNEL_GS %ecx                                              ; \
149         call evtchn_do_upcall                                           ; \
150         add  $4,%esp                                                    ; \
151         jmp  ret_from_intr
152 #endif
153
154
155 #endif /* __ASSEMBLY__ */
156
157 #ifndef __ASSEMBLY__
158 static inline int arch_irqs_disabled_flags(unsigned long flags)
159 {
160         return (flags != 0);
161 }
162
163 #define arch_irqs_disabled()                                            \
164 ({                                                                      \
165         unsigned long flags = arch_local_save_flags();                  \
166                                                                         \
167         arch_irqs_disabled_flags(flags);                                \
168 })
169
170 #else
171
172 #ifdef CONFIG_X86_64
173 #define ARCH_LOCKDEP_SYS_EXIT           call lockdep_sys_exit_thunk
174 #define ARCH_LOCKDEP_SYS_EXIT_IRQ       \
175         TRACE_IRQS_ON; \
176         ENABLE_INTERRUPTS(CLBR_NONE); \
177         SAVE_REST; \
178         LOCKDEP_SYS_EXIT; \
179         RESTORE_REST; \
180         __DISABLE_INTERRUPTS; \
181         TRACE_IRQS_OFF;
182
183 #else
184 #define ARCH_LOCKDEP_SYS_EXIT                   \
185         pushl %eax;                             \
186         pushl %ecx;                             \
187         pushl %edx;                             \
188         call lockdep_sys_exit;                  \
189         popl %edx;                              \
190         popl %ecx;                              \
191         popl %eax;
192
193 #define ARCH_LOCKDEP_SYS_EXIT_IRQ
194 #endif
195
196 #ifdef CONFIG_TRACE_IRQFLAGS
197 #  define TRACE_IRQS_ON         call trace_hardirqs_on_thunk;
198 #  define TRACE_IRQS_OFF        call trace_hardirqs_off_thunk;
199 #else
200 #  define TRACE_IRQS_ON
201 #  define TRACE_IRQS_OFF
202 #endif
203 #ifdef CONFIG_DEBUG_LOCK_ALLOC
204 #  define LOCKDEP_SYS_EXIT      ARCH_LOCKDEP_SYS_EXIT
205 #  define LOCKDEP_SYS_EXIT_IRQ  ARCH_LOCKDEP_SYS_EXIT_IRQ
206 # else
207 #  define LOCKDEP_SYS_EXIT
208 #  define LOCKDEP_SYS_EXIT_IRQ
209 # endif
210
211 #endif /* __ASSEMBLY__ */
212 #endif