update to 2.6.9-rc1
[linux-flexiantxendom0-3.2.10.git] / arch / ppc64 / lib / locks.c
1 /*
2  * Spin and read/write lock operations.
3  *
4  * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
5  * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6  * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
7  *   Rework to support virtual processors
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14
15 #include <linux/config.h>
16 #include <linux/kernel.h>
17 #include <linux/spinlock.h>
18 #include <linux/module.h>
19 #include <linux/stringify.h>
20 #include <asm/hvcall.h>
21 #include <asm/iSeries/HvCall.h>
22
23 #ifndef CONFIG_SPINLINE
24
25 /*
26  * On a system with shared processors (that is, where a physical
27  * processor is multiplexed between several virtual processors),
28  * there is no point spinning on a lock if the holder of the lock
29  * isn't currently scheduled on a physical processor.  Instead
30  * we detect this situation and ask the hypervisor to give the
31  * rest of our timeslice to the lock holder.
32  *
33  * So that we can tell which virtual processor is holding a lock,
34  * we put 0x80000000 | smp_processor_id() in the lock when it is
35  * held.  Conveniently, we have a word in the paca that holds this
36  * value.
37  */
38
39 /* waiting for a spinlock... */
40 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
41
42 /* We only yield to the hypervisor if we are in shared processor mode */
43 #define SHARED_PROCESSOR (get_paca()->lppaca.xSharedProc)
44
45 void __spin_yield(spinlock_t *lock)
46 {
47         unsigned int lock_value, holder_cpu, yield_count;
48         struct paca_struct *holder_paca;
49
50         lock_value = lock->lock;
51         if (lock_value == 0)
52                 return;
53         holder_cpu = lock_value & 0xffff;
54         BUG_ON(holder_cpu >= NR_CPUS);
55         holder_paca = &paca[holder_cpu];
56         yield_count = holder_paca->lppaca.xYieldCount;
57         if ((yield_count & 1) == 0)
58                 return;         /* virtual cpu is currently running */
59         rmb();
60         if (lock->lock != lock_value)
61                 return;         /* something has changed */
62 #ifdef CONFIG_PPC_ISERIES
63         HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
64                 ((u64)holder_cpu << 32) | yield_count);
65 #else
66         plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu),
67                            yield_count);
68 #endif
69 }
70
71 #else /* SPLPAR || ISERIES */
72 #define __spin_yield(x) barrier()
73 #define SHARED_PROCESSOR        0
74 #endif
75
76 /*
77  * This returns the old value in the lock, so we succeeded
78  * in getting the lock if the return value is 0.
79  */
80 static __inline__ unsigned long __spin_trylock(spinlock_t *lock)
81 {
82         unsigned long tmp, tmp2;
83
84         __asm__ __volatile__(
85 "       lwz             %1,%3(13)               # __spin_trylock\n\
86 1:      lwarx           %0,0,%2\n\
87         cmpwi           0,%0,0\n\
88         bne-            2f\n\
89         stwcx.          %1,0,%2\n\
90         bne-            1b\n\
91         isync\n\
92 2:"     : "=&r" (tmp), "=&r" (tmp2)
93         : "r" (&lock->lock), "i" (offsetof(struct paca_struct, lock_token))
94         : "cr0", "memory");
95
96         return tmp;
97 }
98
99 int _raw_spin_trylock(spinlock_t *lock)
100 {
101         return __spin_trylock(lock) == 0;
102 }
103
104 EXPORT_SYMBOL(_raw_spin_trylock);
105
106 void _raw_spin_lock(spinlock_t *lock)
107 {
108         while (1) {
109                 if (likely(__spin_trylock(lock) == 0))
110                         break;
111                 do {
112                         HMT_low();
113                         if (SHARED_PROCESSOR)
114                                 __spin_yield(lock);
115                 } while (likely(lock->lock != 0));
116                 HMT_medium();
117         }
118 }
119
120 EXPORT_SYMBOL(_raw_spin_lock);
121
122 void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
123 {
124         unsigned long flags_dis;
125
126         while (1) {
127                 if (likely(__spin_trylock(lock) == 0))
128                         break;
129                 local_save_flags(flags_dis);
130                 local_irq_restore(flags);
131                 do {
132                         HMT_low();
133                         if (SHARED_PROCESSOR)
134                                 __spin_yield(lock);
135                 } while (likely(lock->lock != 0));
136                 HMT_medium();
137                 local_irq_restore(flags_dis);
138         }
139 }
140
141 EXPORT_SYMBOL(_raw_spin_lock_flags);
142
143 void spin_unlock_wait(spinlock_t *lock)
144 {
145         while (lock->lock) {
146                 HMT_low();
147                 if (SHARED_PROCESSOR)
148                         __spin_yield(lock);
149         }
150         HMT_medium();
151 }
152
153 EXPORT_SYMBOL(spin_unlock_wait);
154
155 /*
156  * Waiting for a read lock or a write lock on a rwlock...
157  * This turns out to be the same for read and write locks, since
158  * we only know the holder if it is write-locked.
159  */
160 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
161 void __rw_yield(rwlock_t *rw)
162 {
163         int lock_value;
164         unsigned int holder_cpu, yield_count;
165         struct paca_struct *holder_paca;
166
167         lock_value = rw->lock;
168         if (lock_value >= 0)
169                 return;         /* no write lock at present */
170         holder_cpu = lock_value & 0xffff;
171         BUG_ON(holder_cpu >= NR_CPUS);
172         holder_paca = &paca[holder_cpu];
173         yield_count = holder_paca->lppaca.xYieldCount;
174         if ((yield_count & 1) == 0)
175                 return;         /* virtual cpu is currently running */
176         rmb();
177         if (rw->lock != lock_value)
178                 return;         /* something has changed */
179 #ifdef CONFIG_PPC_ISERIES
180         HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
181                 ((u64)holder_cpu << 32) | yield_count);
182 #else
183         plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu),
184                            yield_count);
185 #endif
186 }
187
188 #else /* SPLPAR || ISERIES */
189 #define __rw_yield(x)   barrier()
190 #endif
191
192 /*
193  * This returns the old value in the lock + 1,
194  * so we got a read lock if the return value is > 0.
195  */
196 static __inline__ long __read_trylock(rwlock_t *rw)
197 {
198         long tmp;
199
200         __asm__ __volatile__(
201 "1:     lwarx           %0,0,%1         # read_trylock\n\
202         extsw           %0,%0\n\
203         addic.          %0,%0,1\n\
204         ble-            2f\n\
205         stwcx.          %0,0,%1\n\
206         bne-            1b\n\
207         isync\n\
208 2:"     : "=&r" (tmp)
209         : "r" (&rw->lock)
210         : "cr0", "xer", "memory");
211
212         return tmp;
213 }
214
215 int _raw_read_trylock(rwlock_t *rw)
216 {
217         return __read_trylock(rw) > 0;
218 }
219
220 EXPORT_SYMBOL(_raw_read_trylock);
221
222 void _raw_read_lock(rwlock_t *rw)
223 {
224         while (1) {
225                 if (likely(__read_trylock(rw) > 0))
226                         break;
227                 do {
228                         HMT_low();
229                         if (SHARED_PROCESSOR)
230                                 __rw_yield(rw);
231                 } while (likely(rw->lock < 0));
232                 HMT_medium();
233         }
234 }
235
236 EXPORT_SYMBOL(_raw_read_lock);
237
238 void _raw_read_unlock(rwlock_t *rw)
239 {
240         long tmp;
241
242         __asm__ __volatile__(
243         "eieio                          # read_unlock\n\
244 1:      lwarx           %0,0,%1\n\
245         addic           %0,%0,-1\n\
246         stwcx.          %0,0,%1\n\
247         bne-            1b"
248         : "=&r"(tmp)
249         : "r"(&rw->lock)
250         : "cr0", "memory");
251 }
252
253 EXPORT_SYMBOL(_raw_read_unlock);
254
255 /*
256  * This returns the old value in the lock,
257  * so we got the write lock if the return value is 0.
258  */
259 static __inline__ long __write_trylock(rwlock_t *rw)
260 {
261         long tmp, tmp2;
262
263         __asm__ __volatile__(
264 "       lwz             %1,%3(13)       # write_trylock\n\
265 1:      lwarx           %0,0,%2\n\
266         cmpwi           0,%0,0\n\
267         bne-            2f\n\
268         stwcx.          %1,0,%2\n\
269         bne-            1b\n\
270         isync\n\
271 2:"     : "=&r" (tmp), "=&r" (tmp2)
272         : "r" (&rw->lock), "i" (offsetof(struct paca_struct, lock_token))
273         : "cr0", "memory");
274
275         return tmp;
276 }
277
278 int _raw_write_trylock(rwlock_t *rw)
279 {
280         return __write_trylock(rw) == 0;
281 }
282
283 EXPORT_SYMBOL(_raw_write_trylock);
284
285 void _raw_write_lock(rwlock_t *rw)
286 {
287         while (1) {
288                 if (likely(__write_trylock(rw) == 0))
289                         break;
290                 do {
291                         HMT_low();
292                         if (SHARED_PROCESSOR)
293                                 __rw_yield(rw);
294                 } while (likely(rw->lock != 0));
295                 HMT_medium();
296         }
297 }
298
299 EXPORT_SYMBOL(_raw_write_lock);
300
301 #endif /* CONFIG_SPINLINE */