/* note that we're now waiting on the lock, but no longer actively read-locking */
count = rwsem_atomic_update(RWSEM_WAITING_BIAS-RWSEM_ACTIVE_BIAS,sem);
@@ -33,17 +143,18 @@
* - it might even be this process, since the waker takes a more active part
*/
if (!(count & RWSEM_ACTIVE_MASK))
- rwsem_wake(sem);
+ __rwsem_do_wake(sem);
+
+ spin_unlock(&sem->wait_lock);

/* wait to be given the lock */
for (;;) {
- if (!test_bit(RWSEM_WAITING_FOR_READ,&wait.flags))
+ if (!waiter.flags)
break;
schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}

/*
- * handle the lock being released whilst there are processes blocked on it that can now run
- * - if we come here, then:
- * - the 'active part' of the count (&0x0000ffff) reached zero (but may no longer be zero)
- * - the 'waiting part' of the count (&0xffff0000) is negative (and will still be so)
+ * spinlock grabbing wrapper for __rwsem_do_wake()
*/
struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
{
- signed long count;
- int woken;
-
rwsemtrace(sem,"Entering rwsem_wake");

- /* try to grant a single write lock if there's a writer at the front of the queue
- * - note we leave the 'active part' of the count incremented by 1 and the waiting part
- * incremented by 0x00010000
- */
- if (wake_up_ctx(&sem->wait,1,-RWSEM_WAITING_FOR_WRITE)==1)
- goto out;
+ sem = __rwsem_do_wake(sem);

- /* grant an infinite number of read locks to the readers at the front of the queue
- * - note we increment the 'active part' of the count by the number of readers just woken,
- * less one for the activity decrement we've already done
- */
- woken = wake_up_ctx(&sem->wait,65535,-RWSEM_WAITING_FOR_READ);
- if (woken<=0)
- goto counter_correction;
-
- woken *= RWSEM_ACTIVE_BIAS-RWSEM_WAITING_BIAS;
- woken -= RWSEM_ACTIVE_BIAS;
- rwsem_atomic_update(woken,sem);
+ spin_unlock(&sem->wait_lock);