Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / lib / rwsem-spinlock.c
index c4cfd6c..7e0d6a5 100644 (file)
@@ -7,7 +7,7 @@
  */
 #include <linux/rwsem.h>
 #include <linux/sched.h>
-#include <linux/module.h>
+#include <linux/export.h>
 
 struct rwsem_waiter {
        struct list_head list;
@@ -17,6 +17,19 @@ struct rwsem_waiter {
 #define RWSEM_WAITING_FOR_WRITE        0x00000002
 };
 
+int rwsem_is_locked(struct rw_semaphore *sem)
+{
+       int ret = 1;
+       unsigned long flags;
+
+       if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
+               ret = (sem->activity != 0);
+               raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+       }
+       return ret;
+}
+EXPORT_SYMBOL(rwsem_is_locked);
+
 /*
  * initialise the semaphore
  */
@@ -31,9 +44,10 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
        lockdep_init_map(&sem->dep_map, name, key, 0);
 #endif
        sem->activity = 0;
-       spin_lock_init(&sem->wait_lock);
+       raw_spin_lock_init(&sem->wait_lock);
        INIT_LIST_HEAD(&sem->wait_list);
 }
+EXPORT_SYMBOL(__init_rwsem);
 
 /*
  * handle the lock release when processes blocked on it that can now run
@@ -125,17 +139,18 @@ __rwsem_wake_one_writer(struct rw_semaphore *sem)
 /*
  * get a read lock on the semaphore
  */
-void fastcall __sched __down_read(struct rw_semaphore *sem)
+void __sched __down_read(struct rw_semaphore *sem)
 {
        struct rwsem_waiter waiter;
        struct task_struct *tsk;
+       unsigned long flags;
 
-       spin_lock_irq(&sem->wait_lock);
+       raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
        if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
                /* granted */
                sem->activity++;
-               spin_unlock_irq(&sem->wait_lock);
+               raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
                goto out;
        }
 
@@ -150,7 +165,7 @@ void fastcall __sched __down_read(struct rw_semaphore *sem)
        list_add_tail(&waiter.list, &sem->wait_list);
 
        /* we don't need to touch the semaphore struct anymore */
-       spin_unlock_irq(&sem->wait_lock);
+       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 
        /* wait to be given the lock */
        for (;;) {
@@ -168,13 +183,13 @@ void fastcall __sched __down_read(struct rw_semaphore *sem)
 /*
  * trylock for reading -- returns 1 if successful, 0 if contention
  */
-int fastcall __down_read_trylock(struct rw_semaphore *sem)
+int __down_read_trylock(struct rw_semaphore *sem)
 {
        unsigned long flags;
        int ret = 0;
 
 
-       spin_lock_irqsave(&sem->wait_lock, flags);
+       raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
        if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
                /* granted */
@@ -182,7 +197,7 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)
                ret = 1;
        }
 
-       spin_unlock_irqrestore(&sem->wait_lock, flags);
+       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 
        return ret;
 }
@@ -191,17 +206,18 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)
  * get a write lock on the semaphore
  * - we increment the waiting count anyway to indicate an exclusive lock
  */
-void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
+void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
 {
        struct rwsem_waiter waiter;
        struct task_struct *tsk;
+       unsigned long flags;
 
-       spin_lock_irq(&sem->wait_lock);
+       raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
        if (sem->activity == 0 && list_empty(&sem->wait_list)) {
                /* granted */
                sem->activity = -1;
-               spin_unlock_irq(&sem->wait_lock);
+               raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
                goto out;
        }
 
@@ -216,7 +232,7 @@ void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass
        list_add_tail(&waiter.list, &sem->wait_list);
 
        /* we don't need to touch the semaphore struct anymore */
-       spin_unlock_irq(&sem->wait_lock);
+       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 
        /* wait to be given the lock */
        for (;;) {
@@ -231,7 +247,7 @@ void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass
        ;
 }
 
-void fastcall __sched __down_write(struct rw_semaphore *sem)
+void __sched __down_write(struct rw_semaphore *sem)
 {
        __down_write_nested(sem, 0);
 }
@@ -239,12 +255,12 @@ void fastcall __sched __down_write(struct rw_semaphore *sem)
 /*
  * trylock for writing -- returns 1 if successful, 0 if contention
  */
-int fastcall __down_write_trylock(struct rw_semaphore *sem)
+int __down_write_trylock(struct rw_semaphore *sem)
 {
        unsigned long flags;
        int ret = 0;
 
-       spin_lock_irqsave(&sem->wait_lock, flags);
+       raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
        if (sem->activity == 0 && list_empty(&sem->wait_list)) {
                /* granted */
@@ -252,7 +268,7 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)
                ret = 1;
        }
 
-       spin_unlock_irqrestore(&sem->wait_lock, flags);
+       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 
        return ret;
 }
@@ -260,57 +276,48 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)
 /*
  * release a read lock on the semaphore
  */
-void fastcall __up_read(struct rw_semaphore *sem)
+void __up_read(struct rw_semaphore *sem)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&sem->wait_lock, flags);
+       raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
        if (--sem->activity == 0 && !list_empty(&sem->wait_list))
                sem = __rwsem_wake_one_writer(sem);
 
-       spin_unlock_irqrestore(&sem->wait_lock, flags);
+       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 }
 
 /*
  * release a write lock on the semaphore
  */
-void fastcall __up_write(struct rw_semaphore *sem)
+void __up_write(struct rw_semaphore *sem)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&sem->wait_lock, flags);
+       raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
        sem->activity = 0;
        if (!list_empty(&sem->wait_list))
                sem = __rwsem_do_wake(sem, 1);
 
-       spin_unlock_irqrestore(&sem->wait_lock, flags);
+       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 }
 
 /*
  * downgrade a write lock into a read lock
  * - just wake up any readers at the front of the queue
  */
-void fastcall __downgrade_write(struct rw_semaphore *sem)
+void __downgrade_write(struct rw_semaphore *sem)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&sem->wait_lock, flags);
+       raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
        sem->activity = 1;
        if (!list_empty(&sem->wait_list))
                sem = __rwsem_do_wake(sem, 0);
 
-       spin_unlock_irqrestore(&sem->wait_lock, flags);
+       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 }
 
-EXPORT_SYMBOL(__init_rwsem);
-EXPORT_SYMBOL(__down_read);
-EXPORT_SYMBOL(__down_read_trylock);
-EXPORT_SYMBOL(__down_write_nested);
-EXPORT_SYMBOL(__down_write);
-EXPORT_SYMBOL(__down_write_trylock);
-EXPORT_SYMBOL(__up_read);
-EXPORT_SYMBOL(__up_write);
-EXPORT_SYMBOL(__downgrade_write);