2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
8 * This implemenation of synchronization variables is heavily based on
9 * one done by Steve Lord <lord@sgi.com>
11 * Paul Cassella <pwc@sgi.com>
14 #include <linux/kernel.h>
15 #include <linux/sched.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
19 #include <asm/semaphore.h>
20 #include <asm/hardirq.h>
21 #include <asm/current.h>
23 #include <asm/sn/sv.h>
25 /* Define this to have sv_test() run some simple tests.
26 kernel_thread() must behave as expected when this is called. */
31 /* Set up some macros so sv_wait(), sv_signal(), and sv_broadcast()
32 can sanity check interrupt state on architectures where we know
35 #define SV_DEBUG_INTERRUPT_STATE
37 #define SV_TEST_INTERRUPTS_ENABLED(flags) ((flags & 0x1) != 0)
38 #define SV_TEST_INTERRUPTS_DISABLED(flags) ((flags & 0x1) == 0)
39 #define SV_INTERRUPT_TEST_WORKERS 31
41 #define SV_TEST_INTERRUPTS_ENABLED(flags) ((flags & 0x4000) != 0)
42 #define SV_TEST_INTERRUPTS_DISABLED(flags) ((flags & 0x4000) == 0)
43 #define SV_INTERRUPT_TEST_WORKERS 4 /* simulator's slow */
45 #undef SV_DEBUG_INTERRUPT_STATE
46 #define SV_INTERRUPT_TEST_WORKERS 4 /* reasonable? default. */
51 /* XXX FIXME hack hack hack. Our mips64 tree is from before the
52 switch to WQ_FLAG_EXCLUSIVE, and our ia64 tree is from after it. */
54 #undef EXCLUSIVE_IN_QUEUE
56 #define EXCLUSIVE_IN_QUEUE
57 #define TASK_EXCLUSIVE 0 /* for the set_current_state() in sv_wait() */
61 static inline void sv_lock(sv_t *sv) {
62 spin_lock(&sv->sv_lock);
65 static inline void sv_unlock(sv_t *sv) {
66 spin_unlock(&sv->sv_lock);
69 /* up() is "extern inline", so we can't pass its address to sv_wait.
70 Use this function's address instead. */
71 static void up_wrapper(struct semaphore *sem) {
75 /* spin_unlock() is sometimes a macro. */
76 static void spin_unlock_wrapper(spinlock_t *s) {
80 /* XXX Perhaps sv_wait() should do the switch() each time and avoid
81 the extra indirection and the need for the _wrapper functions? */
83 static inline void sv_set_mon_type(sv_t *sv, int type) {
86 sv->sv_mon_unlock_func =
87 (sv_mon_unlock_func_t)spin_unlock_wrapper;
90 sv->sv_mon_unlock_func =
91 (sv_mon_unlock_func_t)up_wrapper;
92 if(sv->sv_flags & SV_INTS) {
93 printk(KERN_ERR "sv_set_mon_type: The monitor lock "
94 "cannot be shared with interrupts if it is a "
98 if(sv->sv_flags & SV_BHS) {
99 printk(KERN_ERR "sv_set_mon_type: The monitor lock "
100 "cannot be shared with bottom-halves if it is "
107 * If needed, and will need to think about interrupts. This
108 * may be needed, for example, if someone wants to use sv's
109 * with something like dev_base; writers need to hold two
114 struct sv_mon_custom *c = lock;
115 sv->sv_mon_unlock_func = c->sv_mon_unlock_func;
116 sv->sv_mon_lock = c->sv_mon_lock;
122 printk(KERN_ERR "sv_set_mon_type: unknown type %d (0x%x)! "
123 "(flags 0x%x)\n", type, type, sv->sv_flags);
127 sv->sv_flags |= type;
130 static inline void sv_set_ord(sv_t *sv, int ord) {
132 ord = SV_ORDER_DEFAULT;
134 if (ord != SV_ORDER_FIFO && ord != SV_ORDER_LIFO) {
135 printk(KERN_EMERG "sv_set_ord: unknown order %d (0x%x)! ",
143 void sv_init(sv_t *sv, sv_mon_lock_t *lock, int flags)
145 int ord = flags & SV_ORDER_MASK;
146 int type = flags & SV_MON_MASK;
148 /* Copy all non-order, non-type flags */
149 sv->sv_flags = (flags & ~(SV_ORDER_MASK | SV_MON_MASK));
151 if((sv->sv_flags & (SV_INTS | SV_BHS)) == (SV_INTS | SV_BHS)) {
152 printk(KERN_ERR "sv_init: do not set both SV_INTS and SV_BHS, only SV_INTS.\n");
157 sv_set_mon_type(sv, type);
159 /* If lock is NULL, we'll get it from sv_wait_compat() (and
160 ignore it in sv_signal() and sv_broadcast()). */
161 sv->sv_mon_lock = lock;
163 spin_lock_init(&sv->sv_lock);
164 init_waitqueue_head(&sv->sv_waiters);
168 * The associated lock must be locked on entry. It is unlocked on return.
172 * n < 0 : interrupted, -n jiffies remaining on timeout, or -1 if timeout == 0
173 * n = 0 : timeout expired
174 * n > 0 : sv_signal()'d, n jiffies remaining on timeout, or 1 if timeout == 0
176 signed long sv_wait(sv_t *sv, int sv_wait_flags, unsigned long timeout)
178 DECLARE_WAITQUEUE( wait, current );
182 #ifdef SV_DEBUG_INTERRUPT_STATE
185 local_save_flags(flags);
187 if(sv->sv_flags & SV_INTS) {
188 if(SV_TEST_INTERRUPTS_ENABLED(flags)) {
189 printk(KERN_ERR "sv_wait: SV_INTS and interrupts "
190 "enabled (flags: 0x%lx)\n", flags);
194 if (SV_TEST_INTERRUPTS_DISABLED(flags)) {
195 printk(KERN_WARNING "sv_wait: !SV_INTS and interrupts "
196 "disabled! (flags: 0x%lx)\n", flags);
200 #endif /* SV_DEBUG_INTERRUPT_STATE */
204 sv->sv_mon_unlock_func(sv->sv_mon_lock);
206 /* Add ourselves to the wait queue and set the state before
207 * releasing the sv_lock so as to avoid racing with the
208 * wake_up() in sv_signal() and sv_broadcast().
211 /* don't need the _irqsave part, but there is no wq_write_lock() */
212 write_lock_irqsave(&sv->sv_waiters.lock, flags);
214 #ifdef EXCLUSIVE_IN_QUEUE
215 wait.flags |= WQ_FLAG_EXCLUSIVE;
218 switch(sv->sv_flags & SV_ORDER_MASK) {
220 __add_wait_queue_tail(&sv->sv_waiters, &wait);
223 __add_wait_queue(&sv->sv_waiters, &wait);
226 printk(KERN_ERR "sv_wait: unknown order! (sv: 0x%p, flags: 0x%x)\n",
227 (void *)sv, sv->sv_flags);
230 write_unlock_irqrestore(&sv->sv_waiters.lock, flags);
232 if(sv_wait_flags & SV_WAIT_SIG)
233 set_current_state(TASK_EXCLUSIVE | TASK_INTERRUPTIBLE );
235 set_current_state(TASK_EXCLUSIVE | TASK_UNINTERRUPTIBLE);
237 spin_unlock(&sv->sv_lock);
239 if(sv->sv_flags & SV_INTS)
241 else if(sv->sv_flags & SV_BHS)
245 ret = schedule_timeout(timeout);
249 if(current->state != TASK_RUNNING) /* XXX Is this possible? */ {
250 printk(KERN_ERR "sv_wait: state not TASK_RUNNING after "
252 set_current_state(TASK_RUNNING);
255 remove_wait_queue(&sv->sv_waiters, &wait);
258 - woken by a sv_signal/sv_broadcast
260 - woken by timeout expiring
263 /* XXX This isn't really accurate; we may have been woken
264 before the signal anyway.... */
265 if(signal_pending(current))
266 return timeout ? -ret : -1;
267 return timeout ? ret : 1;
271 void sv_signal(sv_t *sv)
273 /* If interrupts can acquire this lock, they can also acquire the
274 sv_mon_lock, which we must already have to have called this, so
275 interrupts must be disabled already. If interrupts cannot
276 contend for this lock, we don't have to worry about it. */
278 #ifdef SV_DEBUG_INTERRUPT_STATE
279 if(sv->sv_flags & SV_INTS) {
281 local_save_flags(flags);
282 if(SV_TEST_INTERRUPTS_ENABLED(flags))
283 printk(KERN_ERR "sv_signal: SV_INTS and "
284 "interrupts enabled! (flags: 0x%lx)\n", flags);
286 #endif /* SV_DEBUG_INTERRUPT_STATE */
289 wake_up(&sv->sv_waiters);
293 void sv_broadcast(sv_t *sv)
295 #ifdef SV_DEBUG_INTERRUPT_STATE
296 if(sv->sv_flags & SV_INTS) {
298 local_save_flags(flags);
299 if(SV_TEST_INTERRUPTS_ENABLED(flags))
300 printk(KERN_ERR "sv_broadcast: SV_INTS and "
301 "interrupts enabled! (flags: 0x%lx)\n", flags);
303 #endif /* SV_DEBUG_INTERRUPT_STATE */
306 wake_up_all(&sv->sv_waiters);
310 void sv_destroy(sv_t *sv)
312 if(!spin_trylock(&sv->sv_lock)) {
313 printk(KERN_ERR "sv_destroy: someone else has sv 0x%p locked!\n", (void *)sv);
317 /* XXX Check that the waitqueue is empty?
318 Mark the sv destroyed?
325 static DECLARE_MUTEX_LOCKED(talkback);
326 static DECLARE_MUTEX_LOCKED(sem);
330 static int sv_test_1_w(void *arg)
332 printk("sv_test_1_w: acquiring spinlock 0x%p...\n", arg);
334 spin_lock((spinlock_t*)arg);
335 printk("sv_test_1_w: spinlock acquired, waking sv_test_1_s.\n");
339 printk("sv_test_1_w: sv_spin_wait()'ing.\n");
341 sv_spin_wait(&sv, arg);
343 printk("sv_test_1_w: talkback.\n");
346 printk("sv_test_1_w: exiting.\n");
350 static int sv_test_1_s(void *arg)
352 printk("sv_test_1_s: waiting for semaphore.\n");
354 printk("sv_test_1_s: semaphore acquired. Acquiring spinlock.\n");
355 spin_lock((spinlock_t*)arg);
356 printk("sv_test_1_s: spinlock acquired. sv_signaling.\n");
358 printk("sv_test_1_s: talkback.\n");
360 printk("sv_test_1_s: exiting.\n");
366 static DECLARE_MUTEX(monitor);
368 static int sv_test_2_w(void *arg)
371 sv_t *sv = (sv_t *)arg;
375 printk("sv_test_2_w: thread %d started, sv_waiting.\n", dummy);
376 sv_sema_wait(sv, &monitor);
377 printk("sv_test_2_w: thread %d woken, exiting.\n", dummy);
382 static int sv_test_2_s_1(void *arg)
385 sv_t *sv = (sv_t *)arg;
388 for(i = 0; i < 3; i++) {
389 printk("sv_test_2_s_1: waking one thread.\n");
394 printk("sv_test_2_s_1: signaling and broadcasting again. Nothing should happen.\n");
400 printk("sv_test_2_s_1: talkbacking.\n");
406 static int sv_test_2_s(void *arg)
409 sv_t *sv = (sv_t *)arg;
412 for(i = 0; i < 3; i++) {
413 printk("sv_test_2_s: waking one thread (should be %d.)\n", i);
418 printk("sv_test_3_s: waking remaining threads with broadcast.\n");
423 printk("sv_test_3_s: sending talkback.\n");
426 printk("sv_test_3_s: exiting.\n");
432 static void big_test(sv_t *sv)
438 for(i = 0; i < 3; i++) {
439 printk("big_test: spawning thread %d.\n", i);
440 kernel_thread(sv_test_2_w, sv, 0);
444 printk("big_test: spawning first wake-up thread.\n");
445 kernel_thread(sv_test_2_s_1, sv, 0);
448 printk("big_test: talkback happened.\n");
451 for(i = 3; i < 13; i++) {
452 printk("big_test: spawning thread %d.\n", i);
453 kernel_thread(sv_test_2_w, sv, 0);
457 printk("big_test: spawning wake-up thread.\n");
458 kernel_thread(sv_test_2_s, sv, 0);
464 spinlock_t int_test_spin = SPIN_LOCK_UNLOCKED;
466 static int irqtestcount;
468 static int interrupt_test_worker(void *unused)
470 int id = ++irqtestcount;
472 unsigned long flags, flags2;
474 printk("ITW: thread %d started.\n", id);
477 local_save_flags(flags2);
479 printk("ITW %2d %5d: irqsaving (%lx)\n", id, it, flags2);
480 spin_lock_irqsave(&int_test_spin, flags);
482 printk("ITW %2d %5d: spin_lock_irqing (%lx)\n", id, it, flags2);
483 spin_lock_irq(&int_test_spin);
486 local_save_flags(flags2);
487 printk("ITW %2d %5d: locked, sv_waiting (%lx).\n", id, it, flags2);
488 sv_wait(&int_test_sv, 0, 0);
490 local_save_flags(flags2);
491 printk("ITW %2d %5d: wait finished (%lx), pausing\n", id, it, flags2);
492 set_current_state(TASK_INTERRUPTIBLE);
493 schedule_timeout(jiffies & 0xf);
494 if(current->state != TASK_RUNNING)
495 printk("ITW: current->state isn't RUNNING after schedule!\n");
500 static void interrupt_test(void)
504 printk("interrupt_test: initing sv.\n");
505 sv_init(&int_test_sv, &int_test_spin, SV_MON_SPIN | SV_INTS);
507 for(i = 0; i < SV_INTERRUPT_TEST_WORKERS; i++) {
508 printk("interrupt_test: starting test thread %d.\n", i);
509 kernel_thread(interrupt_test_worker, 0, 0);
511 printk("interrupt_test: done with init part.\n");
517 spinlock_t s = SPIN_LOCK_UNLOCKED;
519 sv_init(&sv, &s, SV_MON_SPIN);
520 printk("sv_test: starting sv_test_1_w.\n");
521 kernel_thread(sv_test_1_w, &s, 0);
522 printk("sv_test: starting sv_test_1_s.\n");
523 kernel_thread(sv_test_1_s, &s, 0);
525 printk("sv_test: waiting for talkback.\n");
526 down(&talkback); down(&talkback);
527 printk("sv_test: talkback happened, sv_destroying.\n");
532 printk("sv_test: beginning big_test on sv.\n");
534 sv_init(&sv, &monitor, SV_MON_SEMA);
538 printk("sv_test: beginning big_test on sv_filo.\n");
539 sv_init(&sv_filo, &monitor, SV_MON_SEMA | SV_ORDER_FILO);
541 sv_destroy(&sv_filo);
545 printk("sv_test: done.\n");
551 #endif /* RUN_SV_TEST */