1 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
2 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
5 #ifndef _ASM_PARISC_ATOMIC_H_
6 #define _ASM_PARISC_ATOMIC_H_
8 #include <linux/types.h>
11 * Atomic operations that C can't guarantee us. Useful for
12 * resource counting etc..
14 * And probably incredibly slow on parisc. OTOH, we don't
15 * have to write any serious assembly. prumpf
19 #include <asm/spinlock.h>
20 #include <asm/cache.h> /* we use L1_CACHE_BYTES */
22 /* Use an array of spinlocks for our atomic_ts.
23 * Hash function to index into a different SPINLOCK.
24 * Since "a" is usually an address, use one spinlock per cacheline.
26 # define ATOMIC_HASH_SIZE 4
27 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
29 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
31 /* Can't use raw_spin_lock_irq because of #include problems, so
32 * this is the substitute */
33 #define _atomic_spin_lock_irqsave(l,f) do { \
34 arch_spinlock_t *s = ATOMIC_HASH(l); \
39 #define _atomic_spin_unlock_irqrestore(l,f) do { \
40 arch_spinlock_t *s = ATOMIC_HASH(l); \
41 arch_spin_unlock(s); \
42 local_irq_restore(f); \
47 # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
48 # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
51 /* This should get optimized out since it's never called.
52 ** Or get a link error if xchg is used "wrong".
54 extern void __xchg_called_with_bad_pointer(void);
57 /* __xchg32/64 defined in arch/parisc/lib/bitops.c */
58 extern unsigned long __xchg8(char, char *);
59 extern unsigned long __xchg32(int, int *);
61 extern unsigned long __xchg64(unsigned long, unsigned long *);
64 /* optimizer better get rid of switch since size is a constant */
65 static __inline__ unsigned long
66 __xchg(unsigned long x, __volatile__ void * ptr, int size)
70 case 8: return __xchg64(x,(unsigned long *) ptr);
72 case 4: return __xchg32((int) x, (int *) ptr);
73 case 1: return __xchg8((char) x, (char *) ptr);
75 __xchg_called_with_bad_pointer();
81 ** REVISIT - Abandoned use of LDCW in xchg() for now:
82 ** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
83 ** o and while we are at it, could CONFIG_64BIT code use LDCD too?
85 ** if (__builtin_constant_p(x) && (x == NULL))
86 ** if (((unsigned long)p & 0xf) == 0)
90 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
93 #define __HAVE_ARCH_CMPXCHG 1
95 /* bug catcher for when unsupported size is used - won't link */
96 extern void __cmpxchg_called_with_bad_pointer(void);
98 /* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
99 extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);
100 extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);
102 /* don't worry...optimizer will get rid of most of this */
103 static __inline__ unsigned long
104 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
108 case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
110 case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
112 __cmpxchg_called_with_bad_pointer();
116 #define cmpxchg(ptr,o,n) \
118 __typeof__(*(ptr)) _o_ = (o); \
119 __typeof__(*(ptr)) _n_ = (n); \
120 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
121 (unsigned long)_n_, sizeof(*(ptr))); \
124 #include <asm-generic/cmpxchg-local.h>
126 static inline unsigned long __cmpxchg_local(volatile void *ptr,
128 unsigned long new_, int size)
132 case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
134 case 4: return __cmpxchg_u32(ptr, old, new_);
136 return __cmpxchg_local_generic(ptr, old, new_, size);
141 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
144 #define cmpxchg_local(ptr, o, n) \
145 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
146 (unsigned long)(n), sizeof(*(ptr))))
148 #define cmpxchg64_local(ptr, o, n) \
150 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
151 cmpxchg_local((ptr), (o), (n)); \
154 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
158 * Note that we need not lock read accesses - aligned word writes/reads
159 * are atomic, so a reader never sees inconsistent values.
162 /* It's possible to reduce all atomic operations to either
163 * __atomic_add_return, atomic_set and atomic_read (the latter
164 * is there only for consistency).
167 static __inline__ int __atomic_add_return(int i, atomic_t *v)
171 _atomic_spin_lock_irqsave(v, flags);
173 ret = (v->counter += i);
175 _atomic_spin_unlock_irqrestore(v, flags);
179 static __inline__ void atomic_set(atomic_t *v, int i)
182 _atomic_spin_lock_irqsave(v, flags);
186 _atomic_spin_unlock_irqrestore(v, flags);
189 static __inline__ int atomic_read(const atomic_t *v)
191 return (*(volatile int *)&(v)->counter);
194 /* exported interface */
195 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
196 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
199 * __atomic_add_unless - add unless the number is a given value
200 * @v: pointer of type atomic_t
201 * @a: the amount to add to v...
202 * @u: ...unless v is equal to u.
204 * Atomically adds @a to @v, so long as it was not @u.
205 * Returns the old value of @v.
207 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
212 if (unlikely(c == (u)))
214 old = atomic_cmpxchg((v), c, c + (a));
215 if (likely(old == c))
223 #define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v))))
224 #define atomic_sub(i,v) ((void)(__atomic_add_return(-(i),(v))))
225 #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
226 #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
228 #define atomic_add_return(i,v) (__atomic_add_return( (i),(v)))
229 #define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v)))
230 #define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
231 #define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
233 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
236 * atomic_inc_and_test - increment and test
237 * @v: pointer of type atomic_t
239 * Atomically increments @v by 1
240 * and returns true if the result is zero, or false for all
243 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
245 #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
247 #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
249 #define ATOMIC_INIT(i) ((atomic_t) { (i) })
251 #define smp_mb__before_atomic_dec() smp_mb()
252 #define smp_mb__after_atomic_dec() smp_mb()
253 #define smp_mb__before_atomic_inc() smp_mb()
254 #define smp_mb__after_atomic_inc() smp_mb()
258 #define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
260 static __inline__ s64
261 __atomic64_add_return(s64 i, atomic64_t *v)
265 _atomic_spin_lock_irqsave(v, flags);
267 ret = (v->counter += i);
269 _atomic_spin_unlock_irqrestore(v, flags);
273 static __inline__ void
274 atomic64_set(atomic64_t *v, s64 i)
277 _atomic_spin_lock_irqsave(v, flags);
281 _atomic_spin_unlock_irqrestore(v, flags);
284 static __inline__ s64
285 atomic64_read(const atomic64_t *v)
287 return (*(volatile long *)&(v)->counter);
290 #define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v))))
291 #define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)(i)),(v))))
292 #define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
293 #define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
295 #define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)(i)),(v)))
296 #define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)(i)),(v)))
297 #define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
298 #define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
300 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
302 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
303 #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
304 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
306 /* exported interface */
307 #define atomic64_cmpxchg(v, o, n) \
308 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
309 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
312 * atomic64_add_unless - add unless the number is a given value
313 * @v: pointer of type atomic64_t
314 * @a: the amount to add to v...
315 * @u: ...unless v is equal to u.
317 * Atomically adds @a to @v, so long as it was not @u.
318 * Returns the old value of @v.
320 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
323 c = atomic64_read(v);
325 if (unlikely(c == (u)))
327 old = atomic64_cmpxchg((v), c, c + (a));
328 if (likely(old == c))
335 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
337 #endif /* !CONFIG_64BIT */
340 #endif /* _ASM_PARISC_ATOMIC_H_ */