-/* rwsem-spinlock.c: R/W semaphores: contention handling functions for generic spinlock
- * implementation
+/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
+ * generic spinlock implementation
*
* Copyright (c) 2001 David Howells (dhowells@redhat.com).
* - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
#include <linux/module.h>
struct rwsem_waiter {
- struct list_head list;
- struct task_struct *task;
- unsigned int flags;
+ struct list_head list;
+ struct task_struct *task;
+ unsigned int flags;
#define RWSEM_WAITING_FOR_READ 0x00000001
#define RWSEM_WAITING_FOR_WRITE 0x00000002
};
{
if (sem->debug)
printk("[%d] %s({%d,%d})\n",
- current->pid,str,sem->activity,list_empty(&sem->wait_list)?0:1);
+ current->pid, str, sem->activity,
+ list_empty(&sem->wait_list) ? 0 : 1);
}
#endif
}
/*
- * handle the lock being released whilst there are processes blocked on it that can now run
+ * handle the lock release when processes blocked on it that can now run
* - if we come here, then:
* - the 'active count' _reached_ zero
* - the 'waiting count' is non-zero
* - woken process blocks are discarded from the list after having task zeroed
* - writers are only woken if wakewrite is non-zero
*/
-static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
+static inline struct rw_semaphore *
+__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
{
struct rwsem_waiter *waiter;
struct task_struct *tsk;
int woken;
- rwsemtrace(sem,"Entering __rwsem_do_wake");
+ rwsemtrace(sem, "Entering __rwsem_do_wake");
- waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
+ waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
if (!wakewrite) {
if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
goto dont_wake_writers;
}
- /* if we are allowed to wake writers try to grant a single write lock if there's a
- * writer at the front of the queue
- * - we leave the 'waiting count' incremented to signify potential contention
+ /* if we are allowed to wake writers try to grant a single write lock
+ * if there's a writer at the front of the queue
+ * - we leave the 'waiting count' incremented to signify potential
+ * contention
*/
if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
sem->activity = -1;
list_del(&waiter->list);
tsk = waiter->task;
+ /* Don't touch waiter after ->task has been NULLed */
mb();
waiter->task = NULL;
wake_up_process(tsk);
goto out;
}
- /* grant an infinite number of read locks to the readers at the front of the queue */
+ /* grant an infinite number of read locks to the front of the queue */
dont_wake_writers:
woken = 0;
- while (waiter->flags&RWSEM_WAITING_FOR_READ) {
+ while (waiter->flags & RWSEM_WAITING_FOR_READ) {
struct list_head *next = waiter->list.next;
list_del(&waiter->list);
woken++;
if (list_empty(&sem->wait_list))
break;
- waiter = list_entry(next,struct rwsem_waiter,list);
+ waiter = list_entry(next, struct rwsem_waiter, list);
}
sem->activity += woken;
out:
- rwsemtrace(sem,"Leaving __rwsem_do_wake");
+ rwsemtrace(sem, "Leaving __rwsem_do_wake");
return sem;
}
/*
* wake a single writer
*/
-static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *sem)
+static inline struct rw_semaphore *
+__rwsem_wake_one_writer(struct rw_semaphore *sem)
{
struct rwsem_waiter *waiter;
struct task_struct *tsk;
sem->activity = -1;
- waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
+ waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
list_del(&waiter->list);
tsk = waiter->task;
/*
* get a read lock on the semaphore
*/
-void fastcall __down_read(struct rw_semaphore *sem)
+void fastcall __sched __down_read(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
- rwsemtrace(sem,"Entering __down_read");
+ rwsemtrace(sem, "Entering __down_read");
spin_lock(&sem->wait_lock);
- if (sem->activity>=0 && list_empty(&sem->wait_list)) {
+ if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity++;
spin_unlock(&sem->wait_lock);
}
tsk = current;
- set_task_state(tsk,TASK_UNINTERRUPTIBLE);
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */
waiter.task = tsk;
waiter.flags = RWSEM_WAITING_FOR_READ;
get_task_struct(tsk);
- list_add_tail(&waiter.list,&sem->wait_list);
+ list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
spin_unlock(&sem->wait_lock);
tsk->state = TASK_RUNNING;
out:
- rwsemtrace(sem,"Leaving __down_read");
+ rwsemtrace(sem, "Leaving __down_read");
}
/*
int fastcall __down_read_trylock(struct rw_semaphore *sem)
{
int ret = 0;
- rwsemtrace(sem,"Entering __down_read_trylock");
+ rwsemtrace(sem, "Entering __down_read_trylock");
spin_lock(&sem->wait_lock);
- if (sem->activity>=0 && list_empty(&sem->wait_list)) {
+ if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity++;
ret = 1;
spin_unlock(&sem->wait_lock);
- rwsemtrace(sem,"Leaving __down_read_trylock");
+ rwsemtrace(sem, "Leaving __down_read_trylock");
return ret;
}
/*
* get a write lock on the semaphore
- * - note that we increment the waiting count anyway to indicate an exclusive lock
+ * - we increment the waiting count anyway to indicate an exclusive lock
*/
-void fastcall __down_write(struct rw_semaphore *sem)
+void fastcall __sched __down_write(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
- rwsemtrace(sem,"Entering __down_write");
+ rwsemtrace(sem, "Entering __down_write");
spin_lock(&sem->wait_lock);
- if (sem->activity==0 && list_empty(&sem->wait_list)) {
+ if (sem->activity == 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity = -1;
spin_unlock(&sem->wait_lock);
}
tsk = current;
- set_task_state(tsk,TASK_UNINTERRUPTIBLE);
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */
waiter.task = tsk;
waiter.flags = RWSEM_WAITING_FOR_WRITE;
get_task_struct(tsk);
- list_add_tail(&waiter.list,&sem->wait_list);
+ list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
spin_unlock(&sem->wait_lock);
tsk->state = TASK_RUNNING;
out:
- rwsemtrace(sem,"Leaving __down_write");
+ rwsemtrace(sem, "Leaving __down_write");
}
/*
int fastcall __down_write_trylock(struct rw_semaphore *sem)
{
int ret = 0;
- rwsemtrace(sem,"Entering __down_write_trylock");
+ rwsemtrace(sem, "Entering __down_write_trylock");
spin_lock(&sem->wait_lock);
- if (sem->activity==0 && list_empty(&sem->wait_list)) {
+ if (sem->activity == 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity = -1;
ret = 1;
spin_unlock(&sem->wait_lock);
- rwsemtrace(sem,"Leaving __down_write_trylock");
+ rwsemtrace(sem, "Leaving __down_write_trylock");
return ret;
}
*/
void fastcall __up_read(struct rw_semaphore *sem)
{
- rwsemtrace(sem,"Entering __up_read");
+ rwsemtrace(sem, "Entering __up_read");
spin_lock(&sem->wait_lock);
- if (--sem->activity==0 && !list_empty(&sem->wait_list))
+ if (--sem->activity == 0 && !list_empty(&sem->wait_list))
sem = __rwsem_wake_one_writer(sem);
spin_unlock(&sem->wait_lock);
- rwsemtrace(sem,"Leaving __up_read");
+ rwsemtrace(sem, "Leaving __up_read");
}
/*
*/
void fastcall __up_write(struct rw_semaphore *sem)
{
- rwsemtrace(sem,"Entering __up_write");
+ rwsemtrace(sem, "Entering __up_write");
spin_lock(&sem->wait_lock);
spin_unlock(&sem->wait_lock);
- rwsemtrace(sem,"Leaving __up_write");
+ rwsemtrace(sem, "Leaving __up_write");
}
/*
*/
void fastcall __downgrade_write(struct rw_semaphore *sem)
{
- rwsemtrace(sem,"Entering __downgrade_write");
+ rwsemtrace(sem, "Entering __downgrade_write");
spin_lock(&sem->wait_lock);
sem->activity = 1;
if (!list_empty(&sem->wait_list))
- sem = __rwsem_do_wake(sem,0);
+ sem = __rwsem_do_wake(sem, 0);
spin_unlock(&sem->wait_lock);
- rwsemtrace(sem,"Leaving __downgrade_write");
+ rwsemtrace(sem, "Leaving __downgrade_write");
}
EXPORT_SYMBOL(init_rwsem);
#include <linux/module.h>
struct rwsem_waiter {
- struct list_head list;
- struct task_struct *task;
- unsigned int flags;
+ struct list_head list;
+ struct task_struct *task;
+ unsigned int flags;
#define RWSEM_WAITING_FOR_READ 0x00000001
#define RWSEM_WAITING_FOR_WRITE 0x00000002
};
#undef rwsemtrace
void rwsemtrace(struct rw_semaphore *sem, const char *str)
{
- printk("sem=%p\n",sem);
- printk("(sem)=%08lx\n",sem->count);
+ printk("sem=%p\n", sem);
+ printk("(sem)=%08lx\n", sem->count);
if (sem->debug)
- printk("[%d] %s({%08lx})\n",current->pid,str,sem->count);
+ printk("[%d] %s({%08lx})\n", current->pid, str, sem->count);
}
#endif
/*
- * handle the lock being released whilst there are processes blocked on it that can now run
+ * handle the lock release when processes blocked on it that can now run
* - if we come here from up_xxxx(), then:
- * - the 'active part' of the count (&0x0000ffff) had reached zero (but may have changed)
- * - the 'waiting part' of the count (&0xffff0000) is negative (and will still be so)
+ * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
+ * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
* - there must be someone on the queue
* - the spinlock must be held by the caller
* - woken process blocks are discarded from the list after having task zeroed
* - writers are only woken if downgrading is false
*/
-static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
+static inline struct rw_semaphore *
+__rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
{
struct rwsem_waiter *waiter;
struct task_struct *tsk;
struct list_head *next;
signed long oldcount, woken, loop;
- rwsemtrace(sem,"Entering __rwsem_do_wake");
+ rwsemtrace(sem, "Entering __rwsem_do_wake");
if (downgrading)
goto dont_wake_writers;
* if we can transition the active part of the count from 0 -> 1
*/
try_again:
- oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS,sem) - RWSEM_ACTIVE_BIAS;
+ oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
+ - RWSEM_ACTIVE_BIAS;
if (oldcount & RWSEM_ACTIVE_MASK)
goto undo;
- waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
+ waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
- /* try to grant a single write lock if there's a writer at the front of the queue
- * - note we leave the 'active part' of the count incremented by 1 and the waiting part
- * incremented by 0x00010000
+ /* try to grant a single write lock if there's a writer at the front
+ * of the queue - note we leave the 'active part' of the count
+ * incremented by 1 and the waiting part incremented by 0x00010000
*/
if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
goto readers_only;
+ /* We must be careful not to touch 'waiter' after we set ->task = NULL.
+ * It is an allocated on the waiter's stack and may become invalid at
+ * any time after that point (due to a wakeup from another source).
+ */
list_del(&waiter->list);
tsk = waiter->task;
mb();
/* don't want to wake any writers */
dont_wake_writers:
- waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
+ waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
goto out;
do {
woken++;
- if (waiter->list.next==&sem->wait_list)
+ if (waiter->list.next == &sem->wait_list)
break;
- waiter = list_entry(waiter->list.next,struct rwsem_waiter,list);
+ waiter = list_entry(waiter->list.next,
+ struct rwsem_waiter, list);
} while (waiter->flags & RWSEM_WAITING_FOR_READ);
loop = woken;
woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS;
if (!downgrading)
- woken -= RWSEM_ACTIVE_BIAS; /* we'd already done one increment
- * earlier */
- rwsem_atomic_add(woken,sem);
+ /* we'd already done one increment earlier */
+ woken -= RWSEM_ACTIVE_BIAS;
+
+ rwsem_atomic_add(woken, sem);
next = sem->wait_list.next;
- for (; loop>0; loop--) {
- waiter = list_entry(next,struct rwsem_waiter,list);
+ for (; loop > 0; loop--) {
+ waiter = list_entry(next, struct rwsem_waiter, list);
next = waiter->list.next;
tsk = waiter->task;
mb();
next->prev = &sem->wait_list;
out:
- rwsemtrace(sem,"Leaving __rwsem_do_wake");
+ rwsemtrace(sem, "Leaving __rwsem_do_wake");
return sem;
/* undo the change to count, but check for a transition 1->0 */
undo:
- if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS,sem)!=0)
+ if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0)
goto out;
goto try_again;
}
/*
* wait for a lock to be granted
*/
-static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore *sem,
- struct rwsem_waiter *waiter,
- signed long adjustment)
+static inline struct rw_semaphore *
+rwsem_down_failed_common(struct rw_semaphore *sem,
+ struct rwsem_waiter *waiter, signed long adjustment)
{
struct task_struct *tsk = current;
signed long count;
- set_task_state(tsk,TASK_UNINTERRUPTIBLE);
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */
spin_lock(&sem->wait_lock);
waiter->task = tsk;
get_task_struct(tsk);
- list_add_tail(&waiter->list,&sem->wait_list);
+ list_add_tail(&waiter->list, &sem->wait_list);
- /* note that we're now waiting on the lock, but no longer actively read-locking */
- count = rwsem_atomic_update(adjustment,sem);
+ /* we're now waiting on the lock, but no longer actively read-locking */
+ count = rwsem_atomic_update(adjustment, sem);
- /* if there are no longer active locks, wake the front queued process(es) up
- * - it might even be this process, since the waker takes a more active part
- */
+ /* if there are no active locks, wake the front queued process(es) up */
if (!(count & RWSEM_ACTIVE_MASK))
sem = __rwsem_do_wake(sem, 0);
/*
* wait for the read lock to be granted
*/
-struct rw_semaphore fastcall __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
+struct rw_semaphore fastcall __sched *
+rwsem_down_read_failed(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
- rwsemtrace(sem,"Entering rwsem_down_read_failed");
+ rwsemtrace(sem, "Entering rwsem_down_read_failed");
waiter.flags = RWSEM_WAITING_FOR_READ;
- rwsem_down_failed_common(sem,&waiter,RWSEM_WAITING_BIAS-RWSEM_ACTIVE_BIAS);
+ rwsem_down_failed_common(sem, &waiter,
+ RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS);
- rwsemtrace(sem,"Leaving rwsem_down_read_failed");
+ rwsemtrace(sem, "Leaving rwsem_down_read_failed");
return sem;
}
/*
* wait for the write lock to be granted
*/
-struct rw_semaphore fastcall __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
+struct rw_semaphore fastcall __sched *
+rwsem_down_write_failed(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
- rwsemtrace(sem,"Entering rwsem_down_write_failed");
+ rwsemtrace(sem, "Entering rwsem_down_write_failed");
waiter.flags = RWSEM_WAITING_FOR_WRITE;
- rwsem_down_failed_common(sem,&waiter,-RWSEM_ACTIVE_BIAS);
+ rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS);
- rwsemtrace(sem,"Leaving rwsem_down_write_failed");
+ rwsemtrace(sem, "Leaving rwsem_down_write_failed");
return sem;
}
/*
* handle waking up a waiter on the semaphore
- * - up_read/up_write has decremented the active part of the count if we come here
+ * - up_read/up_write has decremented the active part of count if we come here
*/
struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
{
- rwsemtrace(sem,"Entering rwsem_wake");
+ rwsemtrace(sem, "Entering rwsem_wake");
spin_lock(&sem->wait_lock);
spin_unlock(&sem->wait_lock);
- rwsemtrace(sem,"Leaving rwsem_wake");
+ rwsemtrace(sem, "Leaving rwsem_wake");
return sem;
}
/*
* downgrade a write lock into a read lock
- * - caller incremented waiting part of count, and discovered it to be still negative
+ * - caller incremented waiting part of count and discovered it still negative
* - just wake up any readers at the front of the queue
*/
struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
{
- rwsemtrace(sem,"Entering rwsem_downgrade_wake");
+ rwsemtrace(sem, "Entering rwsem_downgrade_wake");
spin_lock(&sem->wait_lock);
spin_unlock(&sem->wait_lock);
- rwsemtrace(sem,"Leaving rwsem_downgrade_wake");
+ rwsemtrace(sem, "Leaving rwsem_downgrade_wake");
return sem;
}