Added patch headers.
[linux-flexiantxendom0-3.2.10.git] / arch / x86 / include / mach-xen / asm / synch_bitops.h
1 #ifndef __XEN_SYNCH_BITOPS_H__
2 #define __XEN_SYNCH_BITOPS_H__
3
4 /*
5  * Copyright 1992, Linus Torvalds.
6  * Heavily modified to provide guaranteed strong synchronisation
7  * when communicating with Xen or other guest OSes running on other CPUs.
8  */
9
10 #ifdef HAVE_XEN_PLATFORM_COMPAT_H
11 #include <xen/platform-compat.h>
12 #endif
13
14 #define ADDR (*(volatile long *) addr)
15
16 static __inline__ void synch_set_bit(int nr, volatile void * addr)
17 {
18     __asm__ __volatile__ ( 
19         "lock btsl %1,%0"
20         : "+m" (ADDR) : "Ir" (nr) : "memory" );
21 }
22
23 static __inline__ void synch_clear_bit(int nr, volatile void * addr)
24 {
25     __asm__ __volatile__ (
26         "lock btrl %1,%0"
27         : "+m" (ADDR) : "Ir" (nr) : "memory" );
28 }
29
30 static __inline__ void synch_change_bit(int nr, volatile void * addr)
31 {
32     __asm__ __volatile__ (
33         "lock btcl %1,%0"
34         : "+m" (ADDR) : "Ir" (nr) : "memory" );
35 }
36
37 static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
38 {
39     int oldbit;
40     __asm__ __volatile__ (
41         "lock btsl %2,%1\n\tsbbl %0,%0"
42         : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
43     return oldbit;
44 }
45
46 static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
47 {
48     int oldbit;
49     __asm__ __volatile__ (
50         "lock btrl %2,%1\n\tsbbl %0,%0"
51         : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
52     return oldbit;
53 }
54
55 static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
56 {
57     int oldbit;
58
59     __asm__ __volatile__ (
60         "lock btcl %2,%1\n\tsbbl %0,%0"
61         : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
62     return oldbit;
63 }
64
65 struct __synch_xchg_dummy { unsigned long a[100]; };
66 #define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
67
68 #define synch_cmpxchg(ptr, old, new) \
69 ((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
70                                      (unsigned long)(old), \
71                                      (unsigned long)(new), \
72                                      sizeof(*(ptr))))
73
74 static inline unsigned long __synch_cmpxchg(volatile void *ptr,
75                                             unsigned long old,
76                                             unsigned long new, int size)
77 {
78         unsigned long prev;
79         switch (size) {
80         case 1:
81                 __asm__ __volatile__("lock; cmpxchgb %b1,%2"
82                                      : "=a"(prev)
83                                      : "q"(new), "m"(*__synch_xg(ptr)),
84                                        "0"(old)
85                                      : "memory");
86                 return prev;
87         case 2:
88                 __asm__ __volatile__("lock; cmpxchgw %w1,%2"
89                                      : "=a"(prev)
90                                      : "r"(new), "m"(*__synch_xg(ptr)),
91                                        "0"(old)
92                                      : "memory");
93                 return prev;
94 #ifdef CONFIG_X86_64
95         case 4:
96                 __asm__ __volatile__("lock; cmpxchgl %k1,%2"
97                                      : "=a"(prev)
98                                      : "r"(new), "m"(*__synch_xg(ptr)),
99                                        "0"(old)
100                                      : "memory");
101                 return prev;
102         case 8:
103                 __asm__ __volatile__("lock; cmpxchgq %1,%2"
104                                      : "=a"(prev)
105                                      : "r"(new), "m"(*__synch_xg(ptr)),
106                                        "0"(old)
107                                      : "memory");
108                 return prev;
109 #else
110         case 4:
111                 __asm__ __volatile__("lock; cmpxchgl %1,%2"
112                                      : "=a"(prev)
113                                      : "r"(new), "m"(*__synch_xg(ptr)),
114                                        "0"(old)
115                                      : "memory");
116                 return prev;
117 #endif
118         }
119         return old;
120 }
121
122 #define synch_test_bit test_bit
123
124 #define synch_cmpxchg_subword synch_cmpxchg
125
126 #endif /* __XEN_SYNCH_BITOPS_H__ */