[PATCH] lindent rwsem
authorNick Piggin <nickpiggin@yahoo.com.au>
Sun, 20 Jun 2004 13:45:30 +0000 (06:45 -0700)
committerLinus Torvalds <torvalds@ppc970.osdl.org>
Sun, 20 Jun 2004 13:45:30 +0000 (06:45 -0700)
Lindent rwsem.c and rwsem-spinlock.c and fix a few things by hand.  Also
added a couple of comments for the memory barriers.  Added the __sched
annotation that was left out of rwsem-spinlock.c.

Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

lib/rwsem-spinlock.c
lib/rwsem.c

index a71152d..96255f4 100644 (file)
@@ -1,5 +1,5 @@
-/* rwsem-spinlock.c: R/W semaphores: contention handling functions for generic spinlock
- *                                   implementation
+/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
+ * generic spinlock implementation
  *
  * Copyright (c) 2001   David Howells (dhowells@redhat.com).
  * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
@@ -10,9 +10,9 @@
 #include <linux/module.h>
 
 struct rwsem_waiter {
-       struct list_head        list;
-       struct task_struct      *task;
-       unsigned int            flags;
+       struct list_head list;
+       struct task_struct *task;
+       unsigned int flags;
 #define RWSEM_WAITING_FOR_READ 0x00000001
 #define RWSEM_WAITING_FOR_WRITE        0x00000002
 };
@@ -22,7 +22,8 @@ void rwsemtrace(struct rw_semaphore *sem, const char *str)
 {
        if (sem->debug)
                printk("[%d] %s({%d,%d})\n",
-                      current->pid,str,sem->activity,list_empty(&sem->wait_list)?0:1);
+                      current->pid, str, sem->activity,
+                      list_empty(&sem->wait_list) ? 0 : 1);
 }
 #endif
 
@@ -40,7 +41,7 @@ void fastcall init_rwsem(struct rw_semaphore *sem)
 }
 
 /*
- * handle the lock being released whilst there are processes blocked on it that can now run
+ * handle the lock release when processes blocked on it that can now run
  * - if we come here, then:
  *   - the 'active count' _reached_ zero
  *   - the 'waiting count' is non-zero
@@ -48,15 +49,16 @@ void fastcall init_rwsem(struct rw_semaphore *sem)
  * - woken process blocks are discarded from the list after having task zeroed
  * - writers are only woken if wakewrite is non-zero
  */
-static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
+static inline struct rw_semaphore *
+__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
 {
        struct rwsem_waiter *waiter;
        struct task_struct *tsk;
        int woken;
 
-       rwsemtrace(sem,"Entering __rwsem_do_wake");
+       rwsemtrace(sem, "Entering __rwsem_do_wake");
 
-       waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
+       waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
 
        if (!wakewrite) {
                if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
@@ -64,14 +66,16 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
                goto dont_wake_writers;
        }
 
-       /* if we are allowed to wake writers try to grant a single write lock if there's a
-        * writer at the front of the queue
-        * - we leave the 'waiting count' incremented to signify potential contention
+       /* if we are allowed to wake writers try to grant a single write lock
+        * if there's a writer at the front of the queue
+        * - we leave the 'waiting count' incremented to signify potential
+        *   contention
         */
        if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
                sem->activity = -1;
                list_del(&waiter->list);
                tsk = waiter->task;
+               /* Don't touch waiter after ->task has been NULLed */
                mb();
                waiter->task = NULL;
                wake_up_process(tsk);
@@ -79,10 +83,10 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
                goto out;
        }
 
-       /* grant an infinite number of read locks to the readers at the front of the queue */
+       /* grant an infinite number of read locks to the front of the queue */
  dont_wake_writers:
        woken = 0;
-       while (waiter->flags&RWSEM_WAITING_FOR_READ) {
+       while (waiter->flags & RWSEM_WAITING_FOR_READ) {
                struct list_head *next = waiter->list.next;
 
                list_del(&waiter->list);
@@ -94,27 +98,28 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
                woken++;
                if (list_empty(&sem->wait_list))
                        break;
-               waiter = list_entry(next,struct rwsem_waiter,list);
+               waiter = list_entry(next, struct rwsem_waiter, list);
        }
 
        sem->activity += woken;
 
  out:
-       rwsemtrace(sem,"Leaving __rwsem_do_wake");
+       rwsemtrace(sem, "Leaving __rwsem_do_wake");
        return sem;
 }
 
 /*
  * wake a single writer
  */
-static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *sem)
+static inline struct rw_semaphore *
+__rwsem_wake_one_writer(struct rw_semaphore *sem)
 {
        struct rwsem_waiter *waiter;
        struct task_struct *tsk;
 
        sem->activity = -1;
 
-       waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
+       waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
        list_del(&waiter->list);
 
        tsk = waiter->task;
@@ -128,16 +133,16 @@ static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *
 /*
  * get a read lock on the semaphore
  */
-void fastcall __down_read(struct rw_semaphore *sem)
+void fastcall __sched __down_read(struct rw_semaphore *sem)
 {
        struct rwsem_waiter waiter;
        struct task_struct *tsk;
 
-       rwsemtrace(sem,"Entering __down_read");
+       rwsemtrace(sem, "Entering __down_read");
 
        spin_lock(&sem->wait_lock);
 
-       if (sem->activity>=0 && list_empty(&sem->wait_list)) {
+       if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
                /* granted */
                sem->activity++;
                spin_unlock(&sem->wait_lock);
@@ -145,14 +150,14 @@ void fastcall __down_read(struct rw_semaphore *sem)
        }
 
        tsk = current;
-       set_task_state(tsk,TASK_UNINTERRUPTIBLE);
+       set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 
        /* set up my own style of waitqueue */
        waiter.task = tsk;
        waiter.flags = RWSEM_WAITING_FOR_READ;
        get_task_struct(tsk);
 
-       list_add_tail(&waiter.list,&sem->wait_list);
+       list_add_tail(&waiter.list, &sem->wait_list);
 
        /* we don't need to touch the semaphore struct anymore */
        spin_unlock(&sem->wait_lock);
@@ -168,7 +173,7 @@ void fastcall __down_read(struct rw_semaphore *sem)
        tsk->state = TASK_RUNNING;
 
  out:
-       rwsemtrace(sem,"Leaving __down_read");
+       rwsemtrace(sem, "Leaving __down_read");
 }
 
 /*
@@ -177,11 +182,11 @@ void fastcall __down_read(struct rw_semaphore *sem)
 int fastcall __down_read_trylock(struct rw_semaphore *sem)
 {
        int ret = 0;
-       rwsemtrace(sem,"Entering __down_read_trylock");
+       rwsemtrace(sem, "Entering __down_read_trylock");
 
        spin_lock(&sem->wait_lock);
 
-       if (sem->activity>=0 && list_empty(&sem->wait_list)) {
+       if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
                /* granted */
                sem->activity++;
                ret = 1;
@@ -189,24 +194,24 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)
 
        spin_unlock(&sem->wait_lock);
 
-       rwsemtrace(sem,"Leaving __down_read_trylock");
+       rwsemtrace(sem, "Leaving __down_read_trylock");
        return ret;
 }
 
 /*
  * get a write lock on the semaphore
- * - note that we increment the waiting count anyway to indicate an exclusive lock
+ * - we increment the waiting count anyway to indicate an exclusive lock
  */
-void fastcall __down_write(struct rw_semaphore *sem)
+void fastcall __sched __down_write(struct rw_semaphore *sem)
 {
        struct rwsem_waiter waiter;
        struct task_struct *tsk;
 
-       rwsemtrace(sem,"Entering __down_write");
+       rwsemtrace(sem, "Entering __down_write");
 
        spin_lock(&sem->wait_lock);
 
-       if (sem->activity==0 && list_empty(&sem->wait_list)) {
+       if (sem->activity == 0 && list_empty(&sem->wait_list)) {
                /* granted */
                sem->activity = -1;
                spin_unlock(&sem->wait_lock);
@@ -214,14 +219,14 @@ void fastcall __down_write(struct rw_semaphore *sem)
        }
 
        tsk = current;
-       set_task_state(tsk,TASK_UNINTERRUPTIBLE);
+       set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 
        /* set up my own style of waitqueue */
        waiter.task = tsk;
        waiter.flags = RWSEM_WAITING_FOR_WRITE;
        get_task_struct(tsk);
 
-       list_add_tail(&waiter.list,&sem->wait_list);
+       list_add_tail(&waiter.list, &sem->wait_list);
 
        /* we don't need to touch the semaphore struct anymore */
        spin_unlock(&sem->wait_lock);
@@ -237,7 +242,7 @@ void fastcall __down_write(struct rw_semaphore *sem)
        tsk->state = TASK_RUNNING;
 
  out:
-       rwsemtrace(sem,"Leaving __down_write");
+       rwsemtrace(sem, "Leaving __down_write");
 }
 
 /*
@@ -246,11 +251,11 @@ void fastcall __down_write(struct rw_semaphore *sem)
 int fastcall __down_write_trylock(struct rw_semaphore *sem)
 {
        int ret = 0;
-       rwsemtrace(sem,"Entering __down_write_trylock");
+       rwsemtrace(sem, "Entering __down_write_trylock");
 
        spin_lock(&sem->wait_lock);
 
-       if (sem->activity==0 && list_empty(&sem->wait_list)) {
+       if (sem->activity == 0 && list_empty(&sem->wait_list)) {
                /* granted */
                sem->activity = -1;
                ret = 1;
@@ -258,7 +263,7 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)
 
        spin_unlock(&sem->wait_lock);
 
-       rwsemtrace(sem,"Leaving __down_write_trylock");
+       rwsemtrace(sem, "Leaving __down_write_trylock");
        return ret;
 }
 
@@ -267,16 +272,16 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)
  */
 void fastcall __up_read(struct rw_semaphore *sem)
 {
-       rwsemtrace(sem,"Entering __up_read");
+       rwsemtrace(sem, "Entering __up_read");
 
        spin_lock(&sem->wait_lock);
 
-       if (--sem->activity==0 && !list_empty(&sem->wait_list))
+       if (--sem->activity == 0 && !list_empty(&sem->wait_list))
                sem = __rwsem_wake_one_writer(sem);
 
        spin_unlock(&sem->wait_lock);
 
-       rwsemtrace(sem,"Leaving __up_read");
+       rwsemtrace(sem, "Leaving __up_read");
 }
 
 /*
@@ -284,7 +289,7 @@ void fastcall __up_read(struct rw_semaphore *sem)
  */
 void fastcall __up_write(struct rw_semaphore *sem)
 {
-       rwsemtrace(sem,"Entering __up_write");
+       rwsemtrace(sem, "Entering __up_write");
 
        spin_lock(&sem->wait_lock);
 
@@ -294,7 +299,7 @@ void fastcall __up_write(struct rw_semaphore *sem)
 
        spin_unlock(&sem->wait_lock);
 
-       rwsemtrace(sem,"Leaving __up_write");
+       rwsemtrace(sem, "Leaving __up_write");
 }
 
 /*
@@ -303,17 +308,17 @@ void fastcall __up_write(struct rw_semaphore *sem)
  */
 void fastcall __downgrade_write(struct rw_semaphore *sem)
 {
-       rwsemtrace(sem,"Entering __downgrade_write");
+       rwsemtrace(sem, "Entering __downgrade_write");
 
        spin_lock(&sem->wait_lock);
 
        sem->activity = 1;
        if (!list_empty(&sem->wait_list))
-               sem = __rwsem_do_wake(sem,0);
+               sem = __rwsem_do_wake(sem, 0);
 
        spin_unlock(&sem->wait_lock);
 
-       rwsemtrace(sem,"Leaving __downgrade_write");
+       rwsemtrace(sem, "Leaving __downgrade_write");
 }
 
 EXPORT_SYMBOL(init_rwsem);
index 27dcd95..465ec72 100644 (file)
@@ -9,9 +9,9 @@
 #include <linux/module.h>
 
 struct rwsem_waiter {
-       struct list_head        list;
-       struct task_struct      *task;
-       unsigned int            flags;
+       struct list_head list;
+       struct task_struct *task;
+       unsigned int flags;
 #define RWSEM_WAITING_FOR_READ 0x00000001
 #define RWSEM_WAITING_FOR_WRITE        0x00000002
 };
@@ -20,31 +20,32 @@ struct rwsem_waiter {
 #undef rwsemtrace
 void rwsemtrace(struct rw_semaphore *sem, const char *str)
 {
-       printk("sem=%p\n",sem);
-       printk("(sem)=%08lx\n",sem->count);
+       printk("sem=%p\n", sem);
+       printk("(sem)=%08lx\n", sem->count);
        if (sem->debug)
-               printk("[%d] %s({%08lx})\n",current->pid,str,sem->count);
+               printk("[%d] %s({%08lx})\n", current->pid, str, sem->count);
 }
 #endif
 
 /*
- * handle the lock being released whilst there are processes blocked on it that can now run
+ * handle the lock release when processes blocked on it that can now run
  * - if we come here from up_xxxx(), then:
- *   - the 'active part' of the count (&0x0000ffff) had reached zero (but may have changed)
- *   - the 'waiting part' of the count (&0xffff0000) is negative (and will still be so)
+ *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
+ *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
  *   - there must be someone on the queue
  * - the spinlock must be held by the caller
  * - woken process blocks are discarded from the list after having task zeroed
  * - writers are only woken if downgrading is false
  */
-static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
+static inline struct rw_semaphore *
+__rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
 {
        struct rwsem_waiter *waiter;
        struct task_struct *tsk;
        struct list_head *next;
        signed long oldcount, woken, loop;
 
-       rwsemtrace(sem,"Entering __rwsem_do_wake");
+       rwsemtrace(sem, "Entering __rwsem_do_wake");
 
        if (downgrading)
                goto dont_wake_writers;
@@ -53,19 +54,24 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
         * if we can transition the active part of the count from 0 -> 1
         */
  try_again:
-       oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS,sem) - RWSEM_ACTIVE_BIAS;
+       oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
+                                               - RWSEM_ACTIVE_BIAS;
        if (oldcount & RWSEM_ACTIVE_MASK)
                goto undo;
 
-       waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
+       waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
 
-       /* try to grant a single write lock if there's a writer at the front of the queue
-        * - note we leave the 'active part' of the count incremented by 1 and the waiting part
-        *   incremented by 0x00010000
+       /* try to grant a single write lock if there's a writer at the front
+        * of the queue - note we leave the 'active part' of the count
+        * incremented by 1 and the waiting part incremented by 0x00010000
         */
        if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
                goto readers_only;
 
+       /* We must be careful not to touch 'waiter' after we set ->task = NULL.
+        * It is an allocated on the waiter's stack and may become invalid at
+        * any time after that point (due to a wakeup from another source).
+        */
        list_del(&waiter->list);
        tsk = waiter->task;
        mb();
@@ -76,7 +82,7 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
 
        /* don't want to wake any writers */
  dont_wake_writers:
-       waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
+       waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
        if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
                goto out;
 
@@ -90,23 +96,25 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
        do {
                woken++;
 
-               if (waiter->list.next==&sem->wait_list)
+               if (waiter->list.next == &sem->wait_list)
                        break;
 
-               waiter = list_entry(waiter->list.next,struct rwsem_waiter,list);
+               waiter = list_entry(waiter->list.next,
+                                       struct rwsem_waiter, list);
 
        } while (waiter->flags & RWSEM_WAITING_FOR_READ);
 
        loop = woken;
        woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS;
        if (!downgrading)
-               woken -= RWSEM_ACTIVE_BIAS; /* we'd already done one increment
-                                            * earlier */
-       rwsem_atomic_add(woken,sem);
+               /* we'd already done one increment earlier */
+               woken -= RWSEM_ACTIVE_BIAS;
+
+       rwsem_atomic_add(woken, sem);
 
        next = sem->wait_list.next;
-       for (; loop>0; loop--) {
-               waiter = list_entry(next,struct rwsem_waiter,list);
+       for (; loop > 0; loop--) {
+               waiter = list_entry(next, struct rwsem_waiter, list);
                next = waiter->list.next;
                tsk = waiter->task;
                mb();
@@ -119,12 +127,12 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
        next->prev = &sem->wait_list;
 
  out:
-       rwsemtrace(sem,"Leaving __rwsem_do_wake");
+       rwsemtrace(sem, "Leaving __rwsem_do_wake");
        return sem;
 
        /* undo the change to count, but check for a transition 1->0 */
  undo:
-       if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS,sem)!=0)
+       if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0)
                goto out;
        goto try_again;
 }
@@ -132,28 +140,26 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
 /*
  * wait for a lock to be granted
  */
-static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore *sem,
-                                                                struct rwsem_waiter *waiter,
-                                                                signed long adjustment)
+static inline struct rw_semaphore *
+rwsem_down_failed_common(struct rw_semaphore *sem,
+                       struct rwsem_waiter *waiter, signed long adjustment)
 {
        struct task_struct *tsk = current;
        signed long count;
 
-       set_task_state(tsk,TASK_UNINTERRUPTIBLE);
+       set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 
        /* set up my own style of waitqueue */
        spin_lock(&sem->wait_lock);
        waiter->task = tsk;
        get_task_struct(tsk);
 
-       list_add_tail(&waiter->list,&sem->wait_list);
+       list_add_tail(&waiter->list, &sem->wait_list);
 
-       /* note that we're now waiting on the lock, but no longer actively read-locking */
-       count = rwsem_atomic_update(adjustment,sem);
+       /* we're now waiting on the lock, but no longer actively read-locking */
+       count = rwsem_atomic_update(adjustment, sem);
 
-       /* if there are no longer active locks, wake the front queued process(es) up
-        * - it might even be this process, since the waker takes a more active part
-        */
+       /* if there are no active locks, wake the front queued process(es) up */
        if (!(count & RWSEM_ACTIVE_MASK))
                sem = __rwsem_do_wake(sem, 0);
 
@@ -175,42 +181,45 @@ static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore
 /*
  * wait for the read lock to be granted
  */
-struct rw_semaphore fastcall __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
+struct rw_semaphore fastcall __sched *
+rwsem_down_read_failed(struct rw_semaphore *sem)
 {
        struct rwsem_waiter waiter;
 
-       rwsemtrace(sem,"Entering rwsem_down_read_failed");
+       rwsemtrace(sem, "Entering rwsem_down_read_failed");
 
        waiter.flags = RWSEM_WAITING_FOR_READ;
-       rwsem_down_failed_common(sem,&waiter,RWSEM_WAITING_BIAS-RWSEM_ACTIVE_BIAS);
+       rwsem_down_failed_common(sem, &waiter,
+                               RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS);
 
-       rwsemtrace(sem,"Leaving rwsem_down_read_failed");
+       rwsemtrace(sem, "Leaving rwsem_down_read_failed");
        return sem;
 }
 
 /*
  * wait for the write lock to be granted
  */
-struct rw_semaphore fastcall __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
+struct rw_semaphore fastcall __sched *
+rwsem_down_write_failed(struct rw_semaphore *sem)
 {
        struct rwsem_waiter waiter;
 
-       rwsemtrace(sem,"Entering rwsem_down_write_failed");
+       rwsemtrace(sem, "Entering rwsem_down_write_failed");
 
        waiter.flags = RWSEM_WAITING_FOR_WRITE;
-       rwsem_down_failed_common(sem,&waiter,-RWSEM_ACTIVE_BIAS);
+       rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS);
 
-       rwsemtrace(sem,"Leaving rwsem_down_write_failed");
+       rwsemtrace(sem, "Leaving rwsem_down_write_failed");
        return sem;
 }
 
 /*
  * handle waking up a waiter on the semaphore
- * - up_read/up_write has decremented the active part of the count if we come here
+ * - up_read/up_write has decremented the active part of count if we come here
  */
 struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
 {
-       rwsemtrace(sem,"Entering rwsem_wake");
+       rwsemtrace(sem, "Entering rwsem_wake");
 
        spin_lock(&sem->wait_lock);
 
@@ -220,19 +229,19 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
 
        spin_unlock(&sem->wait_lock);
 
-       rwsemtrace(sem,"Leaving rwsem_wake");
+       rwsemtrace(sem, "Leaving rwsem_wake");
 
        return sem;
 }
 
 /*
  * downgrade a write lock into a read lock
- * - caller incremented waiting part of count, and discovered it to be still negative
+ * - caller incremented waiting part of count and discovered it still negative
  * - just wake up any readers at the front of the queue
  */
 struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
 {
-       rwsemtrace(sem,"Entering rwsem_downgrade_wake");
+       rwsemtrace(sem, "Entering rwsem_downgrade_wake");
 
        spin_lock(&sem->wait_lock);
 
@@ -242,7 +251,7 @@ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
 
        spin_unlock(&sem->wait_lock);
 
-       rwsemtrace(sem,"Leaving rwsem_downgrade_wake");
+       rwsemtrace(sem, "Leaving rwsem_downgrade_wake");
        return sem;
 }