1 #ifndef _ASM_IA64_SPINLOCK_H
2 #define _ASM_IA64_SPINLOCK_H
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
9 * This file is used for SMP configurations only.
12 #include <linux/kernel.h>
14 #include <asm/system.h>
15 #include <asm/bitops.h>
16 #include <asm/atomic.h>
19 volatile unsigned int lock;
22 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
23 #define spin_lock_init(x) ((x)->lock = 0)
29 * Try to get the lock. If we fail to get the lock, make a non-standard call to
30 * ia64_spinlock_contention(). We do not use a normal call because that would force all
31 * callers of spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is
32 * carefully coded to touch only those registers that spin_lock() marks "clobbered".
35 #define IA64_SPINLOCK_CLOBBERS "ar.pfs", "p14", "r28", "r29", "r30", "b6", "memory"
38 _raw_spin_lock (spinlock_t *lock)
40 register volatile unsigned int *ptr asm ("r31") = &lock->lock;
42 #if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 4)
43 # ifdef CONFIG_ITANIUM
44 /* don't use brl on Itanium... */
46 " mov ar.ccv = r0\n\t"
50 "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
51 "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
52 "cmp4.ne p14, p0 = r30, r0\n\t"
54 "(p14) br.cond.spnt.many b6"
55 : "=r"(ptr) : "r"(ptr) : IA64_SPINLOCK_CLOBBERS);
58 " mov ar.ccv = r0\n\t"
62 "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t"
63 "cmp4.ne p14, p0 = r30, r0\n"
64 "(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4"
65 : "=r"(ptr) : "r"(ptr) : IA64_SPINLOCK_CLOBBERS);
66 # endif /* CONFIG_MCKINLEY */
68 # ifdef CONFIG_ITANIUM
69 /* don't use brl on Itanium... */
70 /* mis-declare, so we get the entry-point, not it's function descriptor: */
71 asm volatile ("mov r30 = 1\n\t"
72 "mov ar.ccv = r0;;\n\t"
73 "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t"
74 "movl r29 = ia64_spinlock_contention;;\n\t"
75 "cmp4.ne p14, p0 = r30, r0\n\t"
77 "(p14) br.call.spnt.many b6 = b6"
78 : "=r"(ptr) : "r"(ptr) : IA64_SPINLOCK_CLOBBERS);
80 asm volatile ("mov r30 = 1\n\t"
81 "mov ar.ccv = r0;;\n\t"
82 "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t"
83 "cmp4.ne p14, p0 = r30, r0\n\t"
84 "(p14) brl.call.spnt.many b6=ia64_spinlock_contention"
85 : "=r"(ptr) : "r"(ptr) : IA64_SPINLOCK_CLOBBERS);
86 # endif /* CONFIG_MCKINLEY */
93 * Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set
94 * rather than a simple xchg to avoid writing the cache-line when
95 * there is contention.
97 #define _raw_spin_lock(x) __asm__ __volatile__ ( \
102 "ld4.bias r2 = [%0]\n" \
104 "cmp4.eq p0,p7 = r0,r2\n" \
105 "(p7) br.cond.spnt.few 1b \n" \
106 "cmpxchg4.acq r2 = [%0], r29, ar.ccv\n" \
108 "cmp4.eq p0,p7 = r0, r2\n" \
109 "(p7) br.cond.spnt.few 1b\n" \
111 :: "r"(&(x)->lock) : "ar.ccv", "p7", "r2", "r29", "memory")
113 #endif /* !NEW_LOCK */
115 #define spin_is_locked(x) ((x)->lock != 0)
116 #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
117 #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
118 #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
121 volatile int read_counter : 31;
122 volatile int write_lock : 1;
124 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
126 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
127 #define rwlock_is_locked(x) (*(volatile int *) (x) != 0)
129 #define _raw_read_lock(rw) \
131 rwlock_t *__read_lock_ptr = (rw); \
133 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, "acq") < 0)) { \
134 ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \
135 while (*(volatile int *)__read_lock_ptr < 0) \
141 #define _raw_read_unlock(rw) \
143 rwlock_t *__read_lock_ptr = (rw); \
144 ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \
147 #define _raw_write_lock(rw) \
149 __asm__ __volatile__ ( \
150 "mov ar.ccv = r0\n" \
151 "dep r29 = -1, r0, 31, 1\n" \
156 "cmp4.eq p0,p7 = r0,r2\n" \
157 "(p7) br.cond.spnt.few 1b \n" \
158 "cmpxchg4.acq r2 = [%0], r29, ar.ccv\n" \
160 "cmp4.eq p0,p7 = r0, r2\n" \
161 "(p7) br.cond.spnt.few 1b\n" \
163 :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \
166 #define _raw_write_trylock(rw) \
168 register long result; \
170 __asm__ __volatile__ ( \
171 "mov ar.ccv = r0\n" \
172 "dep r29 = -1, r0, 31, 1\n" \
174 "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \
175 : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \
179 #define _raw_write_unlock(x) \
181 smp_mb__before_clear_bit(); /* need barrier before releasing lock... */ \
182 clear_bit(31, (x)); \
185 #endif /* _ASM_IA64_SPINLOCK_H */