2 * Spin and read/write lock operations.
4 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
7 * Rework to support virtual processors
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/config.h>
16 #include <linux/kernel.h>
17 #include <linux/spinlock.h>
18 #include <linux/module.h>
19 #include <linux/stringify.h>
20 #include <asm/hvcall.h>
21 #include <asm/iSeries/HvCall.h>
23 #ifndef CONFIG_SPINLINE
26 * On a system with shared processors (that is, where a physical
27 * processor is multiplexed between several virtual processors),
28 * there is no point spinning on a lock if the holder of the lock
29 * isn't currently scheduled on a physical processor. Instead
30 * we detect this situation and ask the hypervisor to give the
31 * rest of our timeslice to the lock holder.
33 * So that we can tell which virtual processor is holding a lock,
34 * we put 0x80000000 | smp_processor_id() in the lock when it is
35 * held. Conveniently, we have a word in the paca that holds this
39 /* waiting for a spinlock... */
40 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
42 /* We only yield to the hypervisor if we are in shared processor mode */
43 #define SHARED_PROCESSOR (get_paca()->lppaca.xSharedProc)
45 void __spin_yield(spinlock_t *lock)
47 unsigned int lock_value, holder_cpu, yield_count;
48 struct paca_struct *holder_paca;
50 lock_value = lock->lock;
53 holder_cpu = lock_value & 0xffff;
54 BUG_ON(holder_cpu >= NR_CPUS);
55 holder_paca = &paca[holder_cpu];
56 yield_count = holder_paca->lppaca.xYieldCount;
57 if ((yield_count & 1) == 0)
58 return; /* virtual cpu is currently running */
60 if (lock->lock != lock_value)
61 return; /* something has changed */
62 #ifdef CONFIG_PPC_ISERIES
63 HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
64 ((u64)holder_cpu << 32) | yield_count);
66 plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu),
71 #else /* SPLPAR || ISERIES */
72 #define __spin_yield(x) barrier()
73 #define SHARED_PROCESSOR 0
77 * This returns the old value in the lock, so we succeeded
78 * in getting the lock if the return value is 0.
80 static __inline__ unsigned long __spin_trylock(spinlock_t *lock)
82 unsigned long tmp, tmp2;
85 " lwz %1,%3(13) # __spin_trylock\n\
92 2:" : "=&r" (tmp), "=&r" (tmp2)
93 : "r" (&lock->lock), "i" (offsetof(struct paca_struct, lock_token))
99 int _raw_spin_trylock(spinlock_t *lock)
101 return __spin_trylock(lock) == 0;
104 EXPORT_SYMBOL(_raw_spin_trylock);
106 void _raw_spin_lock(spinlock_t *lock)
109 if (likely(__spin_trylock(lock) == 0))
113 if (SHARED_PROCESSOR)
115 } while (likely(lock->lock != 0));
120 EXPORT_SYMBOL(_raw_spin_lock);
122 void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
124 unsigned long flags_dis;
127 if (likely(__spin_trylock(lock) == 0))
129 local_save_flags(flags_dis);
130 local_irq_restore(flags);
133 if (SHARED_PROCESSOR)
135 } while (likely(lock->lock != 0));
137 local_irq_restore(flags_dis);
141 EXPORT_SYMBOL(_raw_spin_lock_flags);
143 void spin_unlock_wait(spinlock_t *lock)
147 if (SHARED_PROCESSOR)
153 EXPORT_SYMBOL(spin_unlock_wait);
156 * Waiting for a read lock or a write lock on a rwlock...
157 * This turns out to be the same for read and write locks, since
158 * we only know the holder if it is write-locked.
160 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
161 void __rw_yield(rwlock_t *rw)
164 unsigned int holder_cpu, yield_count;
165 struct paca_struct *holder_paca;
167 lock_value = rw->lock;
169 return; /* no write lock at present */
170 holder_cpu = lock_value & 0xffff;
171 BUG_ON(holder_cpu >= NR_CPUS);
172 holder_paca = &paca[holder_cpu];
173 yield_count = holder_paca->lppaca.xYieldCount;
174 if ((yield_count & 1) == 0)
175 return; /* virtual cpu is currently running */
177 if (rw->lock != lock_value)
178 return; /* something has changed */
179 #ifdef CONFIG_PPC_ISERIES
180 HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
181 ((u64)holder_cpu << 32) | yield_count);
183 plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu),
188 #else /* SPLPAR || ISERIES */
189 #define __rw_yield(x) barrier()
193 * This returns the old value in the lock + 1,
194 * so we got a read lock if the return value is > 0.
196 static __inline__ long __read_trylock(rwlock_t *rw)
200 __asm__ __volatile__(
201 "1: lwarx %0,0,%1 # read_trylock\n\
210 : "cr0", "xer", "memory");
215 int _raw_read_trylock(rwlock_t *rw)
217 return __read_trylock(rw) > 0;
220 EXPORT_SYMBOL(_raw_read_trylock);
222 void _raw_read_lock(rwlock_t *rw)
225 if (likely(__read_trylock(rw) > 0))
229 if (SHARED_PROCESSOR)
231 } while (likely(rw->lock < 0));
236 EXPORT_SYMBOL(_raw_read_lock);
238 void _raw_read_unlock(rwlock_t *rw)
242 __asm__ __volatile__(
243 "eieio # read_unlock\n\
253 EXPORT_SYMBOL(_raw_read_unlock);
256 * This returns the old value in the lock,
257 * so we got the write lock if the return value is 0.
259 static __inline__ long __write_trylock(rwlock_t *rw)
263 __asm__ __volatile__(
264 " lwz %1,%3(13) # write_trylock\n\
271 2:" : "=&r" (tmp), "=&r" (tmp2)
272 : "r" (&rw->lock), "i" (offsetof(struct paca_struct, lock_token))
278 int _raw_write_trylock(rwlock_t *rw)
280 return __write_trylock(rw) == 0;
283 EXPORT_SYMBOL(_raw_write_trylock);
285 void _raw_write_lock(rwlock_t *rw)
288 if (likely(__write_trylock(rw) == 0))
292 if (SHARED_PROCESSOR)
294 } while (likely(rw->lock != 0));
299 EXPORT_SYMBOL(_raw_write_lock);
301 #endif /* CONFIG_SPINLINE */