1 #ifndef _X86_IRQFLAGS_H_
2 #define _X86_IRQFLAGS_H_
4 #include <asm/smp-processor-id.h>
7 #include <linux/types.h>
8 #include <xen/interface/vcpu.h>
10 * The use of 'barrier' in the following reflects their use as local-lock
11 * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
12 * critical operations are executed. All critical operations must complete
13 * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
14 * includes these barriers, for example.
17 #define xen_save_fl(void) vcpu_info_read(evtchn_upcall_mask)
19 #define xen_restore_fl(f) \
23 _vcpu = current_vcpu_info(); \
24 if ((_vcpu->evtchn_upcall_mask = (f)) == 0) { \
25 barrier(); /* unmask then check (avoid races) */\
26 if (unlikely(_vcpu->evtchn_upcall_pending)) \
27 force_evtchn_callback(); \
31 #define xen_irq_disable() \
33 vcpu_info_write(evtchn_upcall_mask, 1); \
37 #define xen_irq_enable() \
41 _vcpu = current_vcpu_info(); \
42 _vcpu->evtchn_upcall_mask = 0; \
43 barrier(); /* unmask then check (avoid races) */ \
44 if (unlikely(_vcpu->evtchn_upcall_pending)) \
45 force_evtchn_callback(); \
48 #define arch_local_save_flags() xen_save_fl()
50 #define arch_local_irq_restore(flags) xen_restore_fl(flags)
52 #define arch_local_irq_disable() xen_irq_disable()
54 #define arch_local_irq_enable() xen_irq_enable()
57 * Used in the idle loop; sti takes one instruction cycle
60 #define arch_safe_halt HYPERVISOR_block
63 * Used when interrupts are already enabled or to
64 * shutdown the processor:
66 #define halt() VOID(irqs_disabled() \
67 ? HYPERVISOR_vcpu_op(VCPUOP_down, \
68 smp_processor_id(), NULL) \
74 #define arch_local_irq_save() \
76 unsigned long flags = arch_local_save_flags(); \
78 arch_local_irq_disable(); \
84 /* Offsets into shared_info_t. */
85 #define evtchn_upcall_pending /* 0 */
86 #define evtchn_upcall_mask 1
89 # define __REG_si %rsi
90 # define __CPU_num PER_CPU_VAR(cpu_number)
92 # define __REG_si %esi
93 # define __CPU_num TI_cpu(%ebp)
96 #ifdef CONFIG_XEN_VCPU_INFO_PLACEMENT
98 #define GET_VCPU_INFO PER_CPU(vcpu_info, __REG_si)
99 #define __DISABLE_INTERRUPTS movb $1,PER_CPU_VAR(vcpu_info+evtchn_upcall_mask)
100 #define __ENABLE_INTERRUPTS movb $0,PER_CPU_VAR(vcpu_info+evtchn_upcall_mask)
101 #define __TEST_PENDING cmpb $0,PER_CPU_VAR(vcpu_info+evtchn_upcall_pending+0)
102 #define DISABLE_INTERRUPTS(clb) __DISABLE_INTERRUPTS
103 #define ENABLE_INTERRUPTS(clb) __ENABLE_INTERRUPTS
105 #define __SIZEOF_DISABLE_INTERRUPTS 8
106 #define __SIZEOF_TEST_PENDING 8
108 #else /* CONFIG_XEN_VCPU_INFO_PLACEMENT */
110 #define sizeof_vcpu_shift 6
113 #define GET_VCPU_INFO movl __CPU_num,%esi ; \
114 shl $sizeof_vcpu_shift,%esi ; \
115 add HYPERVISOR_shared_info,__REG_si
117 #define GET_VCPU_INFO mov HYPERVISOR_shared_info,__REG_si
120 #define __DISABLE_INTERRUPTS movb $1,evtchn_upcall_mask(__REG_si)
121 #define __ENABLE_INTERRUPTS movb $0,evtchn_upcall_mask(__REG_si)
122 #define __TEST_PENDING testb $0xFF,evtchn_upcall_pending(__REG_si)
123 #define DISABLE_INTERRUPTS(clb) GET_VCPU_INFO ; \
125 #define ENABLE_INTERRUPTS(clb) GET_VCPU_INFO ; \
128 #define __SIZEOF_DISABLE_INTERRUPTS 4
129 #define __SIZEOF_TEST_PENDING 3
131 #endif /* CONFIG_XEN_VCPU_INFO_PLACEMENT */
133 #ifndef CONFIG_X86_64
134 #define INTERRUPT_RETURN iret
135 #define ENABLE_INTERRUPTS_SYSEXIT \
136 movb $0,evtchn_upcall_mask(%esi) /* __ENABLE_INTERRUPTS */ ; \
137 sysexit_scrit: /**** START OF SYSEXIT CRITICAL REGION ****/ ; \
138 cmpb $0,evtchn_upcall_pending(%esi) /* __TEST_PENDING */ ; \
139 jnz 14f /* process more events if necessary... */ ; \
140 movl PT_ESI(%esp), %esi ; \
142 14: movb $1,evtchn_upcall_mask(%esi) /* __DISABLE_INTERRUPTS */ ; \
144 sysexit_ecrit: /**** END OF SYSEXIT CRITICAL REGION ****/ ; \
145 mov $__KERNEL_PERCPU, %ecx ; \
148 SET_KERNEL_GS %ecx ; \
149 call evtchn_do_upcall ; \
155 #endif /* __ASSEMBLY__ */
158 static inline int arch_irqs_disabled_flags(unsigned long flags)
163 #define arch_irqs_disabled() \
165 unsigned long flags = arch_local_save_flags(); \
167 arch_irqs_disabled_flags(flags); \
173 #define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
174 #define ARCH_LOCKDEP_SYS_EXIT_IRQ \
176 ENABLE_INTERRUPTS(CLBR_NONE); \
180 __DISABLE_INTERRUPTS; \
184 #define ARCH_LOCKDEP_SYS_EXIT \
188 call lockdep_sys_exit; \
193 #define ARCH_LOCKDEP_SYS_EXIT_IRQ
196 #ifdef CONFIG_TRACE_IRQFLAGS
197 # define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
198 # define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
200 # define TRACE_IRQS_ON
201 # define TRACE_IRQS_OFF
203 #ifdef CONFIG_DEBUG_LOCK_ALLOC
204 # define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT
205 # define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ
207 # define LOCKDEP_SYS_EXIT
208 # define LOCKDEP_SYS_EXIT_IRQ
211 #endif /* __ASSEMBLY__ */