Implement down_read_trylock() and down_write_trylock() and add a
authorChristoph Hellwig <hch@sb.bsdonline.org>
Sun, 28 Jul 2002 23:18:19 +0000 (01:18 +0200)
committerChristoph Hellwig <hch@sb.bsdonline.org>
Sun, 28 Jul 2002 23:18:19 +0000 (01:18 +0200)
generic spinlock implementation for downgrade_write().

include/asm-i386/rwsem.h
include/linux/rwsem-spinlock.h
include/linux/rwsem.h
lib/rwsem-spinlock.c

index 72f2ae0..7d3a3f0 100644 (file)
@@ -118,6 +118,29 @@ LOCK_PREFIX        "  incl      (%%eax)\n\t" /* adds 0x00000001, returns the old value
 }
 
 /*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+       __s32 result, tmp;
+       __asm__ __volatile__(
+               "# beginning __down_read_trylock\n\t"
+               "  movl      %0,%1\n\t"
+               "1:\n\t"
+               "  movl      %1,%2\n\t"
+               "  addl      %3,%2\n\t"
+               "  jle       2f\n\t"
+LOCK_PREFIX    "  cmpxchgl  %2,%0\n\t"
+               "  jnz       1b\n\t"
+               "2:\n\t"
+               "# ending __down_read_trylock\n\t"
+               : "+m"(sem->count), "=&a"(result), "=&r"(tmp)
+               : "i"(RWSEM_ACTIVE_READ_BIAS)
+               : "memory", "cc");
+       return result>=0 ? 1 : 0;
+}
+
+/*
  * lock for writing
  */
 static inline void __down_write(struct rw_semaphore *sem)
@@ -145,6 +168,19 @@ LOCK_PREFIX        "  xadd      %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the
 }
 
 /*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+       signed long ret = cmpxchg(&sem->count,
+                                 RWSEM_UNLOCKED_VALUE, 
+                                 RWSEM_ACTIVE_WRITE_BIAS);
+       if (ret == RWSEM_UNLOCKED_VALUE)
+               return 1;
+       return 0;
+}
+
+/*
  * unlock after reading
  */
 static inline void __up_read(struct rw_semaphore *sem)
index 3087c5c..f4ac435 100644 (file)
@@ -54,9 +54,12 @@ struct rw_semaphore {
 
 extern void FASTCALL(init_rwsem(struct rw_semaphore *sem));
 extern void FASTCALL(__down_read(struct rw_semaphore *sem));
+extern int FASTCALL(__down_read_trylock(struct rw_semaphore *sem));
 extern void FASTCALL(__down_write(struct rw_semaphore *sem));
+extern int FASTCALL(__down_write_trylock(struct rw_semaphore *sem));
 extern void FASTCALL(__up_read(struct rw_semaphore *sem));
 extern void FASTCALL(__up_write(struct rw_semaphore *sem));
+extern void FASTCALL(__downgrade_write(struct rw_semaphore *sem));
 
 #endif /* __KERNEL__ */
 #endif /* _LINUX_RWSEM_SPINLOCK_H */
index 320138d..4a7e2bb 100644 (file)
@@ -46,6 +46,18 @@ static inline void down_read(struct rw_semaphore *sem)
 }
 
 /*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+static inline int down_read_trylock(struct rw_semaphore *sem)
+{
+       int ret;
+       rwsemtrace(sem,"Entering down_read_trylock");
+       ret = __down_read_trylock(sem);
+       rwsemtrace(sem,"Leaving down_read_trylock");
+       return ret;
+}
+
+/*
  * lock for writing
  */
 static inline void down_write(struct rw_semaphore *sem)
@@ -56,6 +68,18 @@ static inline void down_write(struct rw_semaphore *sem)
 }
 
 /*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+static inline int down_write_trylock(struct rw_semaphore *sem)
+{
+       int ret;
+       rwsemtrace(sem,"Entering down_write_trylock");
+       ret = __down_write_trylock(sem);
+       rwsemtrace(sem,"Leaving down_write_trylock");
+       return ret;
+}
+
+/*
  * release a read lock
  */
 static inline void up_read(struct rw_semaphore *sem)
@@ -85,6 +109,5 @@ static inline void downgrade_write(struct rw_semaphore *sem)
        rwsemtrace(sem,"Leaving downgrade_write");
 }
 
-
 #endif /* __KERNEL__ */
 #endif /* _LINUX_RWSEM_H */
index a17abe4..0a38eeb 100644 (file)
@@ -46,8 +46,9 @@ void init_rwsem(struct rw_semaphore *sem)
  *   - the 'waiting count' is non-zero
  * - the spinlock must be held by the caller
  * - woken process blocks are discarded from the list after having flags zeroised
+ * - writers are only woken if wakewrite is non-zero
  */
-static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
+static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
 {
        struct rwsem_waiter *waiter;
        int woken;
@@ -56,7 +57,14 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
 
        waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
 
-       /* try to grant a single write lock if there's a writer at the front of the queue
+       if (!wakewrite) {
+               if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
+                       goto out;
+               goto dont_wake_writers;
+       }
+
+       /* if we are allowed to wake writers try to grant a single write lock if there's a
+        * writer at the front of the queue
         * - we leave the 'waiting count' incremented to signify potential contention
         */
        if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
@@ -68,16 +76,19 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
        }
 
        /* grant an infinite number of read locks to the readers at the front of the queue */
+ dont_wake_writers:
        woken = 0;
-       do {
+       while (waiter->flags&RWSEM_WAITING_FOR_READ) {
+               struct list_head *next = waiter->list.next;
+
                list_del(&waiter->list);
                waiter->flags = 0;
                wake_up_process(waiter->task);
                woken++;
                if (list_empty(&sem->wait_list))
                        break;
-               waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
-       } while (waiter->flags&RWSEM_WAITING_FOR_READ);
+               waiter = list_entry(next,struct rwsem_waiter,list);
+       }
 
        sem->activity += woken;
 
@@ -149,6 +160,28 @@ void __down_read(struct rw_semaphore *sem)
 }
 
 /*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+int __down_read_trylock(struct rw_semaphore *sem)
+{
+       int ret = 0;
+       rwsemtrace(sem,"Entering __down_read_trylock");
+
+       spin_lock(&sem->wait_lock);
+
+       if (sem->activity>=0 && list_empty(&sem->wait_list)) {
+               /* granted */
+               sem->activity++;
+               ret = 1;
+       }
+
+       spin_unlock(&sem->wait_lock);
+
+       rwsemtrace(sem,"Leaving __down_read_trylock");
+       return ret;
+}
+
+/*
  * get a write lock on the semaphore
  * - note that we increment the waiting count anyway to indicate an exclusive lock
  */
@@ -195,6 +228,28 @@ void __down_write(struct rw_semaphore *sem)
 }
 
 /*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+int __down_write_trylock(struct rw_semaphore *sem)
+{
+       int ret = 0;
+       rwsemtrace(sem,"Entering __down_write_trylock");
+
+       spin_lock(&sem->wait_lock);
+
+       if (sem->activity==0 && list_empty(&sem->wait_list)) {
+               /* granted */
+               sem->activity = -1;
+               ret = 1;
+       }
+
+       spin_unlock(&sem->wait_lock);
+
+       rwsemtrace(sem,"Leaving __down_write_trylock");
+       return ret;
+}
+
+/*
  * release a read lock on the semaphore
  */
 void __up_read(struct rw_semaphore *sem)
@@ -222,18 +277,40 @@ void __up_write(struct rw_semaphore *sem)
 
        sem->activity = 0;
        if (!list_empty(&sem->wait_list))
-               sem = __rwsem_do_wake(sem);
+               sem = __rwsem_do_wake(sem, 1);
 
        spin_unlock(&sem->wait_lock);
 
        rwsemtrace(sem,"Leaving __up_write");
 }
 
+/*
+ * downgrade a write lock into a read lock
+ * - just wake up any readers at the front of the queue
+ */
+void __downgrade_write(struct rw_semaphore *sem)
+{
+       rwsemtrace(sem,"Entering __rwsem_downgrade");
+
+       spin_lock(&sem->wait_lock);
+
+       sem->activity = 1;
+       if (!list_empty(&sem->wait_list))
+               sem = __rwsem_do_wake(sem,0);
+
+       spin_unlock(&sem->wait_lock);
+
+       rwsemtrace(sem,"Leaving __rwsem_downgrade");
+}
+
 EXPORT_SYMBOL(init_rwsem);
 EXPORT_SYMBOL(__down_read);
+EXPORT_SYMBOL(__down_read_trylock);
 EXPORT_SYMBOL(__down_write);
+EXPORT_SYMBOL(__down_write_trylock);
 EXPORT_SYMBOL(__up_read);
 EXPORT_SYMBOL(__up_write);
+EXPORT_SYMBOL(__downgrade_write);
 #if RWSEM_DEBUG
 EXPORT_SYMBOL(rwsemtrace);
 #endif