[PATCH] fastcall / regparm fixes
[linux-flexiantxendom0-3.2.10.git] / lib / rwsem.c
1 /* rwsem.c: R/W semaphores: contention handling functions
2  *
3  * Written by David Howells (dhowells@redhat.com).
4  * Derived from arch/i386/kernel/semaphore.c
5  */
6 #include <linux/rwsem.h>
7 #include <linux/sched.h>
8 #include <linux/module.h>
9
10 struct rwsem_waiter {
11         struct list_head        list;
12         struct task_struct      *task;
13         unsigned int            flags;
14 #define RWSEM_WAITING_FOR_READ  0x00000001
15 #define RWSEM_WAITING_FOR_WRITE 0x00000002
16 };
17
18 #if RWSEM_DEBUG
19 #undef rwsemtrace
20 void rwsemtrace(struct rw_semaphore *sem, const char *str)
21 {
22         printk("sem=%p\n",sem);
23         printk("(sem)=%08lx\n",sem->count);
24         if (sem->debug)
25                 printk("[%d] %s({%08lx})\n",current->pid,str,sem->count);
26 }
27 #endif
28
29 /*
30  * handle the lock being released whilst there are processes blocked on it that can now run
31  * - if we come here, then:
32  *   - the 'active part' of the count (&0x0000ffff) reached zero but has been re-incremented
33  *   - the 'waiting part' of the count (&0xffff0000) is negative (and will still be so)
34  *   - there must be someone on the queue
35  * - the spinlock must be held by the caller
36  * - woken process blocks are discarded from the list after having flags zeroised
37  * - writers are only woken if wakewrite is non-zero
38  */
39 static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
40 {
41         struct rwsem_waiter *waiter;
42         struct list_head *next;
43         signed long oldcount;
44         int woken, loop;
45
46         rwsemtrace(sem,"Entering __rwsem_do_wake");
47
48         if (!wakewrite)
49                 goto dont_wake_writers;
50
51         /* only wake someone up if we can transition the active part of the count from 0 -> 1 */
52  try_again:
53         oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS,sem) - RWSEM_ACTIVE_BIAS;
54         if (oldcount & RWSEM_ACTIVE_MASK)
55                 goto undo;
56
57         waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
58
59         /* try to grant a single write lock if there's a writer at the front of the queue
60          * - note we leave the 'active part' of the count incremented by 1 and the waiting part
61          *   incremented by 0x00010000
62          */
63         if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
64                 goto readers_only;
65
66         list_del(&waiter->list);
67         waiter->flags = 0;
68         wake_up_process(waiter->task);
69         goto out;
70
71         /* don't want to wake any writers */
72  dont_wake_writers:
73         waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
74         if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
75                 goto out;
76
77         /* grant an infinite number of read locks to the readers at the front of the queue
78          * - note we increment the 'active part' of the count by the number of readers (less one
79          *   for the activity decrement we've already done) before waking any processes up
80          */
81  readers_only:
82         woken = 0;
83         do {
84                 woken++;
85
86                 if (waiter->list.next==&sem->wait_list)
87                         break;
88
89                 waiter = list_entry(waiter->list.next,struct rwsem_waiter,list);
90
91         } while (waiter->flags & RWSEM_WAITING_FOR_READ);
92
93         loop = woken;
94         woken *= RWSEM_ACTIVE_BIAS-RWSEM_WAITING_BIAS;
95         woken -= RWSEM_ACTIVE_BIAS;
96         rwsem_atomic_add(woken,sem);
97
98         next = sem->wait_list.next;
99         for (; loop>0; loop--) {
100                 waiter = list_entry(next,struct rwsem_waiter,list);
101                 next = waiter->list.next;
102                 waiter->flags = 0;
103                 wake_up_process(waiter->task);
104         }
105
106         sem->wait_list.next = next;
107         next->prev = &sem->wait_list;
108
109  out:
110         rwsemtrace(sem,"Leaving __rwsem_do_wake");
111         return sem;
112
113         /* undo the change to count, but check for a transition 1->0 */
114  undo:
115         if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS,sem)!=0)
116                 goto out;
117         goto try_again;
118 }
119
120 /*
121  * wait for a lock to be granted
122  */
123 static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore *sem,
124                                                                  struct rwsem_waiter *waiter,
125                                                                  signed long adjustment)
126 {
127         struct task_struct *tsk = current;
128         signed long count;
129
130         set_task_state(tsk,TASK_UNINTERRUPTIBLE);
131
132         /* set up my own style of waitqueue */
133         spin_lock(&sem->wait_lock);
134         waiter->task = tsk;
135
136         list_add_tail(&waiter->list,&sem->wait_list);
137
138         /* note that we're now waiting on the lock, but no longer actively read-locking */
139         count = rwsem_atomic_update(adjustment,sem);
140
141         /* if there are no longer active locks, wake the front queued process(es) up
142          * - it might even be this process, since the waker takes a more active part
143          */
144         if (!(count & RWSEM_ACTIVE_MASK))
145                 sem = __rwsem_do_wake(sem,1);
146
147         spin_unlock(&sem->wait_lock);
148
149         /* wait to be given the lock */
150         for (;;) {
151                 if (!waiter->flags)
152                         break;
153                 schedule();
154                 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
155         }
156
157         tsk->state = TASK_RUNNING;
158
159         return sem;
160 }
161
162 /*
163  * wait for the read lock to be granted
164  */
165 struct rw_semaphore fastcall *rwsem_down_read_failed(struct rw_semaphore *sem)
166 {
167         struct rwsem_waiter waiter;
168
169         rwsemtrace(sem,"Entering rwsem_down_read_failed");
170
171         waiter.flags = RWSEM_WAITING_FOR_READ;
172         rwsem_down_failed_common(sem,&waiter,RWSEM_WAITING_BIAS-RWSEM_ACTIVE_BIAS);
173
174         rwsemtrace(sem,"Leaving rwsem_down_read_failed");
175         return sem;
176 }
177
178 /*
179  * wait for the write lock to be granted
180  */
181 struct rw_semaphore fastcall *rwsem_down_write_failed(struct rw_semaphore *sem)
182 {
183         struct rwsem_waiter waiter;
184
185         rwsemtrace(sem,"Entering rwsem_down_write_failed");
186
187         waiter.flags = RWSEM_WAITING_FOR_WRITE;
188         rwsem_down_failed_common(sem,&waiter,-RWSEM_ACTIVE_BIAS);
189
190         rwsemtrace(sem,"Leaving rwsem_down_write_failed");
191         return sem;
192 }
193
194 /*
195  * handle waking up a waiter on the semaphore
196  * - up_read has decremented the active part of the count if we come here
197  */
198 struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
199 {
200         rwsemtrace(sem,"Entering rwsem_wake");
201
202         spin_lock(&sem->wait_lock);
203
204         /* do nothing if list empty */
205         if (!list_empty(&sem->wait_list))
206                 sem = __rwsem_do_wake(sem,1);
207
208         spin_unlock(&sem->wait_lock);
209
210         rwsemtrace(sem,"Leaving rwsem_wake");
211
212         return sem;
213 }
214
215 /*
216  * downgrade a write lock into a read lock
217  * - caller incremented waiting part of count, and discovered it to be still negative
218  * - just wake up any readers at the front of the queue
219  */
220 struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
221 {
222         rwsemtrace(sem,"Entering rwsem_downgrade_wake");
223
224         spin_lock(&sem->wait_lock);
225
226         /* do nothing if list empty */
227         if (!list_empty(&sem->wait_list))
228                 sem = __rwsem_do_wake(sem,0);
229
230         spin_unlock(&sem->wait_lock);
231
232         rwsemtrace(sem,"Leaving rwsem_downgrade_wake");
233         return sem;
234 }
235
236 EXPORT_SYMBOL_NOVERS(rwsem_down_read_failed);
237 EXPORT_SYMBOL_NOVERS(rwsem_down_write_failed);
238 EXPORT_SYMBOL_NOVERS(rwsem_wake);
239 EXPORT_SYMBOL_NOVERS(rwsem_downgrade_wake);
240 #if RWSEM_DEBUG
241 EXPORT_SYMBOL(rwsemtrace);
242 #endif