diff options
author | Will Deacon | 2015-08-06 17:54:42 +0100 |
---|---|---|
committer | Ingo Molnar | 2015-08-12 11:59:06 +0200 |
commit | 77e430e3e45662b696dc49aa53ea0f7ac63f2574 (patch) | |
tree | 1fae9dab1524de8b3ca2516228ff76b809626974 /include | |
parent | 2b2a85a4d3534b8884fcfa5bb52837f0e1c672bc (diff) |
locking/qrwlock: Make use of _{acquire|release|relaxed}() atomics
The qrwlock implementation is slightly heavy in its use of memory
barriers, mainly through the use of _cmpxchg() and _return() atomics, which
imply full barrier semantics.
This patch modifies the qrwlock code to use the more relaxed atomic
routines so that we can reduce the unnecessary barrier overhead on
weakly-ordered architectures.
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman.Long@hp.com
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/1438880084-18856-7-git-send-email-will.deacon@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/qrwlock.h | 13 |
1 files changed, 6 insertions, 7 deletions
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index eb673dde8879..54a8e65e18b6 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h @@ -68,7 +68,7 @@ static inline int queued_read_trylock(struct qrwlock *lock) cnts = atomic_read(&lock->cnts); if (likely(!(cnts & _QW_WMASK))) { - cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts); + cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); if (likely(!(cnts & _QW_WMASK))) return 1; atomic_sub(_QR_BIAS, &lock->cnts); @@ -89,8 +89,8 @@ static inline int queued_write_trylock(struct qrwlock *lock) if (unlikely(cnts)) return 0; - return likely(atomic_cmpxchg(&lock->cnts, - cnts, cnts | _QW_LOCKED) == cnts); + return likely(atomic_cmpxchg_acquire(&lock->cnts, + cnts, cnts | _QW_LOCKED) == cnts); } /** * queued_read_lock - acquire read lock of a queue rwlock @@ -100,7 +100,7 @@ static inline void queued_read_lock(struct qrwlock *lock) { u32 cnts; - cnts = atomic_add_return(_QR_BIAS, &lock->cnts); + cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts); if (likely(!(cnts & _QW_WMASK))) return; @@ -115,7 +115,7 @@ static inline void queued_read_lock(struct qrwlock *lock) static inline void queued_write_lock(struct qrwlock *lock) { /* Optimize for the unfair lock case where the fair flag is 0. */ - if (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0) + if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0) return; queued_write_lock_slowpath(lock); @@ -130,8 +130,7 @@ static inline void queued_read_unlock(struct qrwlock *lock) /* * Atomically decrement the reader count */ - smp_mb__before_atomic(); - atomic_sub(_QR_BIAS, &lock->cnts); + (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts); } /** |