diff options
Diffstat (limited to 'kernel/locking/rwsem-xadd.c')
-rw-r--r-- | kernel/locking/rwsem-xadd.c | 147 |
1 files changed, 49 insertions, 98 deletions
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 3083fdf50447..7d537b50a849 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c @@ -9,6 +9,8 @@ * * Optimistic spinning by Tim Chen <tim.c.chen@intel.com> * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes. + * + * Rwsem count bit fields re-definition by Waiman Long <longman@redhat.com>. */ #include <linux/rwsem.h> #include <linux/init.h> @@ -22,52 +24,20 @@ #include "rwsem.h" /* - * Guide to the rw_semaphore's count field for common values. - * (32-bit case illustrated, similar for 64-bit) - * - * 0x0000000X (1) X readers active or attempting lock, no writer waiting - * X = #active_readers + #readers attempting to lock - * (X*ACTIVE_BIAS) - * - * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or - * attempting to read lock or write lock. - * - * 0xffff000X (1) X readers active or attempting lock, with waiters for lock - * X = #active readers + # readers attempting lock - * (X*ACTIVE_BIAS + WAITING_BIAS) - * (2) 1 writer attempting lock, no waiters for lock - * X-1 = #active readers + #readers attempting lock - * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS) - * (3) 1 writer active, no waiters for lock - * X-1 = #active readers + #readers attempting lock - * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS) - * - * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock - * (WAITING_BIAS + ACTIVE_BIAS) - * (2) 1 writer active or attempting lock, no waiters for lock - * (ACTIVE_WRITE_BIAS) + * Guide to the rw_semaphore's count field. * - * 0xffff0000 (1) There are writers or readers queued but none active - * or in the process of attempting lock. - * (WAITING_BIAS) - * Note: writer can attempt to steal lock for this count by adding - * ACTIVE_WRITE_BIAS in cmpxchg and checking the old count + * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned + * by a writer. * - * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue. - * (ACTIVE_WRITE_BIAS + WAITING_BIAS) - * - * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking - * the count becomes more than 0 for successful lock acquisition, - * i.e. the case where there are only readers or nobody has lock. - * (1st and 2nd case above). - * - * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and - * checking the count becomes ACTIVE_WRITE_BIAS for successful lock - * acquisition (i.e. nobody else has lock or attempts lock). If - * unsuccessful, in rwsem_down_write_failed, we'll check to see if there - * are only waiters but none active (5th case above), and attempt to - * steal the lock. + * The lock is owned by readers when + * (1) the RWSEM_WRITER_LOCKED isn't set in count, + * (2) some of the reader bits are set in count, and + * (3) the owner field has RWSEM_READ_OWNED bit set. * + * Having some reader bits set is not enough to guarantee a readers owned + * lock as the readers may be in the process of backing out from the count + * and a writer has just released the lock. So another writer may steal + * the lock immediately after that. */ /* @@ -113,9 +83,8 @@ enum rwsem_wake_type { /* * handle the lock release when processes blocked on it that can now run - * - if we come here from up_xxxx(), then: - * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) - * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) + * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must + * have been set. * - there must be someone on the queue * - the wait_lock must be held by the caller * - tasks are marked for wakeup, the caller must later invoke wake_up_q() @@ -160,22 +129,11 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem, * so we can bail out early if a writer stole the lock. */ if (wake_type != RWSEM_WAKE_READ_OWNED) { - adjustment = RWSEM_ACTIVE_READ_BIAS; - try_reader_grant: + adjustment = RWSEM_READER_BIAS; oldcount = atomic_long_fetch_add(adjustment, &sem->count); - if (unlikely(oldcount < RWSEM_WAITING_BIAS)) { - /* - * If the count is still less than RWSEM_WAITING_BIAS - * after removing the adjustment, it is assumed that - * a writer has stolen the lock. We have to undo our - * reader grant. - */ - if (atomic_long_add_return(-adjustment, &sem->count) < - RWSEM_WAITING_BIAS) - return; - - /* Last active locker left. Retry waking readers. */ - goto try_reader_grant; + if (unlikely(oldcount & RWSEM_WRITER_MASK)) { + atomic_long_sub(adjustment, &sem->count); + return; } /* * Set it to reader-owned to give spinners an early @@ -209,11 +167,11 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem, } list_cut_before(&wlist, &sem->wait_list, &waiter->list); - adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment; + adjustment = woken * RWSEM_READER_BIAS - adjustment; lockevent_cond_inc(rwsem_wake_reader, woken); if (list_empty(&sem->wait_list)) { /* hit end of list above */ - adjustment -= RWSEM_WAITING_BIAS; + adjustment -= RWSEM_FLAG_WAITERS; } if (adjustment) @@ -248,22 +206,15 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem, */ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem) { - /* - * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS. - */ - if (count != RWSEM_WAITING_BIAS) + long new; + + if (count & RWSEM_LOCK_MASK) return false; - /* - * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there - * are other tasks on the wait list, we need to add on WAITING_BIAS. - */ - count = list_is_singular(&sem->wait_list) ? - RWSEM_ACTIVE_WRITE_BIAS : - RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS; + new = count + RWSEM_WRITER_LOCKED - + (list_is_singular(&sem->wait_list) ? RWSEM_FLAG_WAITERS : 0); - if (atomic_long_cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count) - == RWSEM_WAITING_BIAS) { + if (atomic_long_try_cmpxchg_acquire(&sem->count, &count, new)) { rwsem_set_owner(sem); return true; } @@ -279,9 +230,9 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) { long count = atomic_long_read(&sem->count); - while (!count || count == RWSEM_WAITING_BIAS) { + while (!(count & RWSEM_LOCK_MASK)) { if (atomic_long_try_cmpxchg_acquire(&sem->count, &count, - count + RWSEM_ACTIVE_WRITE_BIAS)) { + count + RWSEM_WRITER_LOCKED)) { rwsem_set_owner(sem); lockevent_inc(rwsem_opt_wlock); return true; @@ -424,7 +375,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) static inline struct rw_semaphore __sched * __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state) { - long count, adjustment = -RWSEM_ACTIVE_READ_BIAS; + long count, adjustment = -RWSEM_READER_BIAS; struct rwsem_waiter waiter; DEFINE_WAKE_Q(wake_q); @@ -436,16 +387,16 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state) /* * In case the wait queue is empty and the lock isn't owned * by a writer, this reader can exit the slowpath and return - * immediately as its RWSEM_ACTIVE_READ_BIAS has already - * been set in the count. + * immediately as its RWSEM_READER_BIAS has already been + * set in the count. */ - if (atomic_long_read(&sem->count) >= 0) { + if (!(atomic_long_read(&sem->count) & RWSEM_WRITER_MASK)) { raw_spin_unlock_irq(&sem->wait_lock); rwsem_set_reader_owned(sem); lockevent_inc(rwsem_rlock_fast); return sem; } - adjustment += RWSEM_WAITING_BIAS; + adjustment += RWSEM_FLAG_WAITERS; } list_add_tail(&waiter.list, &sem->wait_list); @@ -458,9 +409,8 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state) * If there are no writers and we are first in the queue, * wake our own waiter to join the existing active readers ! */ - if (count == RWSEM_WAITING_BIAS || - (count > RWSEM_WAITING_BIAS && - adjustment != -RWSEM_ACTIVE_READ_BIAS)) + if (!(count & RWSEM_LOCK_MASK) || + (!(count & RWSEM_WRITER_MASK) && (adjustment & RWSEM_FLAG_WAITERS))) __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); raw_spin_unlock_irq(&sem->wait_lock); @@ -488,7 +438,7 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state) out_nolock: list_del(&waiter.list); if (list_empty(&sem->wait_list)) - atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count); + atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count); raw_spin_unlock_irq(&sem->wait_lock); __set_current_state(TASK_RUNNING); lockevent_inc(rwsem_rlock_fail); @@ -521,9 +471,6 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state) struct rw_semaphore *ret = sem; DEFINE_WAKE_Q(wake_q); - /* undo write bias from down_write operation, stop active locking */ - count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count); - /* do optimistic spinning and steal lock if possible */ if (rwsem_optimistic_spin(sem)) return sem; @@ -543,16 +490,18 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state) list_add_tail(&waiter.list, &sem->wait_list); - /* we're now waiting on the lock, but no longer actively locking */ + /* we're now waiting on the lock */ if (waiting) { count = atomic_long_read(&sem->count); /* * If there were already threads queued before us and there are - * no active writers, the lock must be read owned; so we try to - * wake any read locks that were queued ahead of us. + * no active writers and some readers, the lock must be read + * owned; so we try to any read locks that were queued ahead + * of us. */ - if (count > RWSEM_WAITING_BIAS) { + if (!(count & RWSEM_WRITER_MASK) && + (count & RWSEM_READER_MASK)) { __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q); /* * The wakeup is normally called _after_ the wait_lock @@ -569,8 +518,9 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state) wake_q_init(&wake_q); } - } else - count = atomic_long_add_return(RWSEM_WAITING_BIAS, &sem->count); + } else { + count = atomic_long_add_return(RWSEM_FLAG_WAITERS, &sem->count); + } /* wait until we successfully acquire the lock */ set_current_state(state); @@ -587,7 +537,8 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state) schedule(); lockevent_inc(rwsem_sleep_writer); set_current_state(state); - } while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK); + count = atomic_long_read(&sem->count); + } while (count & RWSEM_LOCK_MASK); raw_spin_lock_irq(&sem->wait_lock); } @@ -603,7 +554,7 @@ out_nolock: raw_spin_lock_irq(&sem->wait_lock); list_del(&waiter.list); if (list_empty(&sem->wait_list)) - atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count); + atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count); else __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); raw_spin_unlock_irq(&sem->wait_lock); |