}
/*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+ __s32 result, tmp;
+ __asm__ __volatile__(
+ "# beginning __down_read_trylock\n\t"
+ " movl %0,%1\n\t"
+ "1:\n\t"
+ " movl %1,%2\n\t"
+ " addl %3,%2\n\t"
+ " jle 2f\n\t"
+LOCK_PREFIX " cmpxchgl %2,%0\n\t"
+ " jnz 1b\n\t"
+ "2:\n\t"
+ "# ending __down_read_trylock\n\t"
+ : "+m"(sem->count), "=&a"(result), "=&r"(tmp)
+ : "i"(RWSEM_ACTIVE_READ_BIAS)
+ : "memory", "cc");
+ return result>=0 ? 1 : 0;
+}
+
+/*
* lock for writing
*/
static inline void __down_write(struct rw_semaphore *sem)
}
/*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+ signed long ret = cmpxchg(&sem->count,
+ RWSEM_UNLOCKED_VALUE,
+ RWSEM_ACTIVE_WRITE_BIAS);
+ if (ret == RWSEM_UNLOCKED_VALUE)
+ return 1;
+ return 0;
+}
+
+/*
* unlock after reading
*/
static inline void __up_read(struct rw_semaphore *sem)
}
/*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+static inline int down_read_trylock(struct rw_semaphore *sem)
+{
+ int ret;
+ rwsemtrace(sem,"Entering down_read_trylock");
+ ret = __down_read_trylock(sem);
+ rwsemtrace(sem,"Leaving down_read_trylock");
+ return ret;
+}
+
+/*
* lock for writing
*/
static inline void down_write(struct rw_semaphore *sem)
}
/*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+static inline int down_write_trylock(struct rw_semaphore *sem)
+{
+ int ret;
+ rwsemtrace(sem,"Entering down_write_trylock");
+ ret = __down_write_trylock(sem);
+ rwsemtrace(sem,"Leaving down_write_trylock");
+ return ret;
+}
+
+/*
* release a read lock
*/
static inline void up_read(struct rw_semaphore *sem)
rwsemtrace(sem,"Leaving downgrade_write");
}
-
#endif /* __KERNEL__ */
#endif /* _LINUX_RWSEM_H */
* - the 'waiting count' is non-zero
* - the spinlock must be held by the caller
* - woken process blocks are discarded from the list after having flags zeroised
+ * - writers are only woken if wakewrite is non-zero
*/
-static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
+static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
{
struct rwsem_waiter *waiter;
int woken;
waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
- /* try to grant a single write lock if there's a writer at the front of the queue
+ if (!wakewrite) {
+ if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
+ goto out;
+ goto dont_wake_writers;
+ }
+
+ /* if we are allowed to wake writers try to grant a single write lock if there's a
+ * writer at the front of the queue
* - we leave the 'waiting count' incremented to signify potential contention
*/
if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
}
/* grant an infinite number of read locks to the readers at the front of the queue */
+ dont_wake_writers:
woken = 0;
- do {
+ while (waiter->flags&RWSEM_WAITING_FOR_READ) {
+ struct list_head *next = waiter->list.next;
+
list_del(&waiter->list);
waiter->flags = 0;
wake_up_process(waiter->task);
woken++;
if (list_empty(&sem->wait_list))
break;
- waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
- } while (waiter->flags&RWSEM_WAITING_FOR_READ);
+ waiter = list_entry(next,struct rwsem_waiter,list);
+ }
sem->activity += woken;
}
/*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+int __down_read_trylock(struct rw_semaphore *sem)
+{
+ int ret = 0;
+ rwsemtrace(sem,"Entering __down_read_trylock");
+
+ spin_lock(&sem->wait_lock);
+
+ if (sem->activity>=0 && list_empty(&sem->wait_list)) {
+ /* granted */
+ sem->activity++;
+ ret = 1;
+ }
+
+ spin_unlock(&sem->wait_lock);
+
+ rwsemtrace(sem,"Leaving __down_read_trylock");
+ return ret;
+}
+
+/*
* get a write lock on the semaphore
* - note that we increment the waiting count anyway to indicate an exclusive lock
*/
}
/*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+int __down_write_trylock(struct rw_semaphore *sem)
+{
+ int ret = 0;
+ rwsemtrace(sem,"Entering __down_write_trylock");
+
+ spin_lock(&sem->wait_lock);
+
+ if (sem->activity==0 && list_empty(&sem->wait_list)) {
+ /* granted */
+ sem->activity = -1;
+ ret = 1;
+ }
+
+ spin_unlock(&sem->wait_lock);
+
+ rwsemtrace(sem,"Leaving __down_write_trylock");
+ return ret;
+}
+
+/*
* release a read lock on the semaphore
*/
void __up_read(struct rw_semaphore *sem)
sem->activity = 0;
if (!list_empty(&sem->wait_list))
- sem = __rwsem_do_wake(sem);
+ sem = __rwsem_do_wake(sem, 1);
spin_unlock(&sem->wait_lock);
rwsemtrace(sem,"Leaving __up_write");
}
+/*
+ * downgrade a write lock into a read lock
+ * - just wake up any readers at the front of the queue
+ */
+void __downgrade_write(struct rw_semaphore *sem)
+{
+ rwsemtrace(sem,"Entering __rwsem_downgrade");
+
+ spin_lock(&sem->wait_lock);
+
+ sem->activity = 1;
+ if (!list_empty(&sem->wait_list))
+ sem = __rwsem_do_wake(sem,0);
+
+ spin_unlock(&sem->wait_lock);
+
+ rwsemtrace(sem,"Leaving __rwsem_downgrade");
+}
+
EXPORT_SYMBOL(init_rwsem);
EXPORT_SYMBOL(__down_read);
+EXPORT_SYMBOL(__down_read_trylock);
EXPORT_SYMBOL(__down_write);
+EXPORT_SYMBOL(__down_write_trylock);
EXPORT_SYMBOL(__up_read);
EXPORT_SYMBOL(__up_write);
+EXPORT_SYMBOL(__downgrade_write);
#if RWSEM_DEBUG
EXPORT_SYMBOL(rwsemtrace);
#endif