Linux-2.6.12-rc2
[linux-flexiantxendom0-natty.git] / include / asm-m32r / spinlock.h
1 #ifndef _ASM_M32R_SPINLOCK_H
2 #define _ASM_M32R_SPINLOCK_H
3
4 /*
5  *  linux/include/asm-m32r/spinlock.h
6  *
7  *  M32R version:
8  *    Copyright (C) 2001, 2002  Hitoshi Yamamoto
9  *    Copyright (C) 2004  Hirokazu Takata <takata at linux-m32r.org>
10  */
11
12 #include <linux/config.h>       /* CONFIG_DEBUG_SPINLOCK, CONFIG_SMP */
13 #include <linux/compiler.h>
14 #include <asm/atomic.h>
15 #include <asm/page.h>
16
17 extern int printk(const char * fmt, ...)
18         __attribute__ ((format (printf, 1, 2)));
19
20 #define RW_LOCK_BIAS             0x01000000
21 #define RW_LOCK_BIAS_STR        "0x01000000"
22
23 /*
24  * Your basic SMP spinlocks, allowing only a single CPU anywhere
25  */
26
27 typedef struct {
28         volatile int slock;
29 #ifdef CONFIG_DEBUG_SPINLOCK
30         unsigned magic;
31 #endif
32 #ifdef CONFIG_PREEMPT
33         unsigned int break_lock;
34 #endif
35 } spinlock_t;
36
37 #define SPINLOCK_MAGIC  0xdead4ead
38
39 #ifdef CONFIG_DEBUG_SPINLOCK
40 #define SPINLOCK_MAGIC_INIT     , SPINLOCK_MAGIC
41 #else
42 #define SPINLOCK_MAGIC_INIT     /* */
43 #endif
44
45 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
46
47 #define spin_lock_init(x)       do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
48
49 /*
50  * Simple spin lock operations.  There are two variants, one clears IRQ's
51  * on the local processor, one does not.
52  *
53  * We make no fairness assumptions. They have a cost.
54  */
55
56 #define spin_is_locked(x)       (*(volatile int *)(&(x)->slock) <= 0)
57 #define spin_unlock_wait(x)     do { barrier(); } while(spin_is_locked(x))
58 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
59
60 /**
61  * _raw_spin_trylock - Try spin lock and return a result
62  * @lock: Pointer to the lock variable
63  *
64  * _raw_spin_trylock() tries to get the lock and returns a result.
65  * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
66  */
67 static inline int _raw_spin_trylock(spinlock_t *lock)
68 {
69         int oldval;
70         unsigned long tmp1, tmp2;
71
72         /*
73          * lock->slock :  =1 : unlock
74          *             : <=0 : lock
75          * {
76          *   oldval = lock->slock; <--+ need atomic operation
77          *   lock->slock = 0;      <--+
78          * }
79          */
80         __asm__ __volatile__ (
81                 "# spin_trylock                 \n\t"
82                 "ldi    %1, #0;                 \n\t"
83                 "mvfc   %2, psw;                \n\t"
84                 "clrpsw #0x40 -> nop;           \n\t"
85                 DCACHE_CLEAR("%0", "r6", "%3")
86                 "lock   %0, @%3;                \n\t"
87                 "unlock %1, @%3;                \n\t"
88                 "mvtc   %2, psw;                \n\t"
89                 : "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2)
90                 : "r" (&lock->slock)
91                 : "memory"
92 #ifdef CONFIG_CHIP_M32700_TS1
93                 , "r6"
94 #endif  /* CONFIG_CHIP_M32700_TS1 */
95         );
96
97         return (oldval > 0);
98 }
99
100 static inline void _raw_spin_lock(spinlock_t *lock)
101 {
102         unsigned long tmp0, tmp1;
103
104 #ifdef CONFIG_DEBUG_SPINLOCK
105         if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
106                 printk("pc: %p\n", __builtin_return_address(0));
107                 BUG();
108         }
109 #endif
110         /*
111          * lock->slock :  =1 : unlock
112          *             : <=0 : lock
113          *
114          * for ( ; ; ) {
115          *   lock->slock -= 1;  <-- need atomic operation
116          *   if (lock->slock == 0) break;
117          *   for ( ; lock->slock <= 0 ; );
118          * }
119          */
120         __asm__ __volatile__ (
121                 "# spin_lock                    \n\t"
122                 ".fillinsn                      \n"
123                 "1:                             \n\t"
124                 "mvfc   %1, psw;                \n\t"
125                 "clrpsw #0x40 -> nop;           \n\t"
126                 DCACHE_CLEAR("%0", "r6", "%2")
127                 "lock   %0, @%2;                \n\t"
128                 "addi   %0, #-1;                \n\t"
129                 "unlock %0, @%2;                \n\t"
130                 "mvtc   %1, psw;                \n\t"
131                 "bltz   %0, 2f;                 \n\t"
132                 LOCK_SECTION_START(".balign 4 \n\t")
133                 ".fillinsn                      \n"
134                 "2:                             \n\t"
135                 "ld     %0, @%2;                \n\t"
136                 "bgtz   %0, 1b;                 \n\t"
137                 "bra    2b;                     \n\t"
138                 LOCK_SECTION_END
139                 : "=&r" (tmp0), "=&r" (tmp1)
140                 : "r" (&lock->slock)
141                 : "memory"
142 #ifdef CONFIG_CHIP_M32700_TS1
143                 , "r6"
144 #endif  /* CONFIG_CHIP_M32700_TS1 */
145         );
146 }
147
148 static inline void _raw_spin_unlock(spinlock_t *lock)
149 {
150 #ifdef CONFIG_DEBUG_SPINLOCK
151         BUG_ON(lock->magic != SPINLOCK_MAGIC);
152         BUG_ON(!spin_is_locked(lock));
153 #endif
154         mb();
155         lock->slock = 1;
156 }
157
158 /*
159  * Read-write spinlocks, allowing multiple readers
160  * but only one writer.
161  *
162  * NOTE! it is quite common to have readers in interrupts
163  * but no interrupt writers. For those circumstances we
164  * can "mix" irq-safe locks - any writer needs to get a
165  * irq-safe write-lock, but readers can get non-irqsafe
166  * read-locks.
167  */
168 typedef struct {
169         volatile int lock;
170 #ifdef CONFIG_DEBUG_SPINLOCK
171         unsigned magic;
172 #endif
173 #ifdef CONFIG_PREEMPT
174         unsigned int break_lock;
175 #endif
176 } rwlock_t;
177
178 #define RWLOCK_MAGIC    0xdeaf1eed
179
180 #ifdef CONFIG_DEBUG_SPINLOCK
181 #define RWLOCK_MAGIC_INIT       , RWLOCK_MAGIC
182 #else
183 #define RWLOCK_MAGIC_INIT       /* */
184 #endif
185
186 #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
187
188 #define rwlock_init(x)  do { *(x) = RW_LOCK_UNLOCKED; } while(0)
189
190 /**
191  * read_can_lock - would read_trylock() succeed?
192  * @lock: the rwlock in question.
193  */
194 #define read_can_lock(x) ((int)(x)->lock > 0)
195
196 /**
197  * write_can_lock - would write_trylock() succeed?
198  * @lock: the rwlock in question.
199  */
200 #define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
201
202 /*
203  * On x86, we implement read-write locks as a 32-bit counter
204  * with the high bit (sign) being the "contended" bit.
205  *
206  * The inline assembly is non-obvious. Think about it.
207  *
208  * Changed to use the same technique as rw semaphores.  See
209  * semaphore.h for details.  -ben
210  */
211 /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
212
213 static inline void _raw_read_lock(rwlock_t *rw)
214 {
215         unsigned long tmp0, tmp1;
216
217 #ifdef CONFIG_DEBUG_SPINLOCK
218         BUG_ON(rw->magic != RWLOCK_MAGIC);
219 #endif
220         /*
221          * rw->lock :  >0 : unlock
222          *          : <=0 : lock
223          *
224          * for ( ; ; ) {
225          *   rw->lock -= 1;  <-- need atomic operation
226          *   if (rw->lock >= 0) break;
227          *   rw->lock += 1;  <-- need atomic operation
228          *   for ( ; rw->lock <= 0 ; );
229          * }
230          */
231         __asm__ __volatile__ (
232                 "# read_lock                    \n\t"
233                 ".fillinsn                      \n"
234                 "1:                             \n\t"
235                 "mvfc   %1, psw;                \n\t"
236                 "clrpsw #0x40 -> nop;           \n\t"
237                 DCACHE_CLEAR("%0", "r6", "%2")
238                 "lock   %0, @%2;                \n\t"
239                 "addi   %0, #-1;                \n\t"
240                 "unlock %0, @%2;                \n\t"
241                 "mvtc   %1, psw;                \n\t"
242                 "bltz   %0, 2f;                 \n\t"
243                 LOCK_SECTION_START(".balign 4 \n\t")
244                 ".fillinsn                      \n"
245                 "2:                             \n\t"
246                 "clrpsw #0x40 -> nop;           \n\t"
247                 DCACHE_CLEAR("%0", "r6", "%2")
248                 "lock   %0, @%2;                \n\t"
249                 "addi   %0, #1;                 \n\t"
250                 "unlock %0, @%2;                \n\t"
251                 "mvtc   %1, psw;                \n\t"
252                 ".fillinsn                      \n"
253                 "3:                             \n\t"
254                 "ld     %0, @%2;                \n\t"
255                 "bgtz   %0, 1b;                 \n\t"
256                 "bra    3b;                     \n\t"
257                 LOCK_SECTION_END
258                 : "=&r" (tmp0), "=&r" (tmp1)
259                 : "r" (&rw->lock)
260                 : "memory"
261 #ifdef CONFIG_CHIP_M32700_TS1
262                 , "r6"
263 #endif  /* CONFIG_CHIP_M32700_TS1 */
264         );
265 }
266
267 static inline void _raw_write_lock(rwlock_t *rw)
268 {
269         unsigned long tmp0, tmp1, tmp2;
270
271 #ifdef CONFIG_DEBUG_SPINLOCK
272         BUG_ON(rw->magic != RWLOCK_MAGIC);
273 #endif
274         /*
275          * rw->lock :  =RW_LOCK_BIAS_STR : unlock
276          *          : !=RW_LOCK_BIAS_STR : lock
277          *
278          * for ( ; ; ) {
279          *   rw->lock -= RW_LOCK_BIAS_STR;  <-- need atomic operation
280          *   if (rw->lock == 0) break;
281          *   rw->lock += RW_LOCK_BIAS_STR;  <-- need atomic operation
282          *   for ( ; rw->lock != RW_LOCK_BIAS_STR ; ) ;
283          * }
284          */
285         __asm__ __volatile__ (
286                 "# write_lock                                   \n\t"
287                 "seth   %1, #high(" RW_LOCK_BIAS_STR ");        \n\t"
288                 "or3    %1, %1, #low(" RW_LOCK_BIAS_STR ");     \n\t"
289                 ".fillinsn                                      \n"
290                 "1:                                             \n\t"
291                 "mvfc   %2, psw;                                \n\t"
292                 "clrpsw #0x40 -> nop;                           \n\t"
293                 DCACHE_CLEAR("%0", "r7", "%3")
294                 "lock   %0, @%3;                                \n\t"
295                 "sub    %0, %1;                                 \n\t"
296                 "unlock %0, @%3;                                \n\t"
297                 "mvtc   %2, psw;                                \n\t"
298                 "bnez   %0, 2f;                                 \n\t"
299                 LOCK_SECTION_START(".balign 4 \n\t")
300                 ".fillinsn                                      \n"
301                 "2:                                             \n\t"
302                 "clrpsw #0x40 -> nop;                           \n\t"
303                 DCACHE_CLEAR("%0", "r7", "%3")
304                 "lock   %0, @%3;                                \n\t"
305                 "add    %0, %1;                                 \n\t"
306                 "unlock %0, @%3;                                \n\t"
307                 "mvtc   %2, psw;                                \n\t"
308                 ".fillinsn                                      \n"
309                 "3:                                             \n\t"
310                 "ld     %0, @%3;                                \n\t"
311                 "beq    %0, %1, 1b;                             \n\t"
312                 "bra    3b;                                     \n\t"
313                 LOCK_SECTION_END
314                 : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
315                 : "r" (&rw->lock)
316                 : "memory"
317 #ifdef CONFIG_CHIP_M32700_TS1
318                 , "r7"
319 #endif  /* CONFIG_CHIP_M32700_TS1 */
320         );
321 }
322
323 static inline void _raw_read_unlock(rwlock_t *rw)
324 {
325         unsigned long tmp0, tmp1;
326
327         __asm__ __volatile__ (
328                 "# read_unlock                  \n\t"
329                 "mvfc   %1, psw;                \n\t"
330                 "clrpsw #0x40 -> nop;           \n\t"
331                 DCACHE_CLEAR("%0", "r6", "%2")
332                 "lock   %0, @%2;                \n\t"
333                 "addi   %0, #1;                 \n\t"
334                 "unlock %0, @%2;                \n\t"
335                 "mvtc   %1, psw;                \n\t"
336                 : "=&r" (tmp0), "=&r" (tmp1)
337                 : "r" (&rw->lock)
338                 : "memory"
339 #ifdef CONFIG_CHIP_M32700_TS1
340                 , "r6"
341 #endif  /* CONFIG_CHIP_M32700_TS1 */
342         );
343 }
344
345 static inline void _raw_write_unlock(rwlock_t *rw)
346 {
347         unsigned long tmp0, tmp1, tmp2;
348
349         __asm__ __volatile__ (
350                 "# write_unlock                                 \n\t"
351                 "seth   %1, #high(" RW_LOCK_BIAS_STR ");        \n\t"
352                 "or3    %1, %1, #low(" RW_LOCK_BIAS_STR ");     \n\t"
353                 "mvfc   %2, psw;                                \n\t"
354                 "clrpsw #0x40 -> nop;                           \n\t"
355                 DCACHE_CLEAR("%0", "r7", "%3")
356                 "lock   %0, @%3;                                \n\t"
357                 "add    %0, %1;                                 \n\t"
358                 "unlock %0, @%3;                                \n\t"
359                 "mvtc   %2, psw;                                \n\t"
360                 : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
361                 : "r" (&rw->lock)
362                 : "memory"
363 #ifdef CONFIG_CHIP_M32700_TS1
364                 , "r7"
365 #endif  /* CONFIG_CHIP_M32700_TS1 */
366         );
367 }
368
369 #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
370
371 static inline int _raw_write_trylock(rwlock_t *lock)
372 {
373         atomic_t *count = (atomic_t *)lock;
374         if (atomic_sub_and_test(RW_LOCK_BIAS, count))
375                 return 1;
376         atomic_add(RW_LOCK_BIAS, count);
377         return 0;
378 }
379
380 #endif  /* _ASM_M32R_SPINLOCK_H */