diff options
author | Chris Metcalf | 2015-04-28 13:00:42 -0400 |
---|---|---|
committer | Chris Metcalf | 2015-04-28 22:43:16 -0400 |
commit | 627ae54854edfbf29d5997015c190de22eef497f (patch) | |
tree | efa812a770bc06ce188f77262b5916e171ceaca1 | |
parent | 14c3dec2a875d898262be79c0f85e5f2b70a71b0 (diff) |
tile: use READ_ONCE() in arch_spin_is_locked()
This avoid potential issues if callers were to loop on these
routines without some kind of memory barrier. Currently there
are no such users in-tree, but it seems better safe than sorry.
Also, in the tilepro case we read "current" before "next",
which gives us a slightly better guarantee that the lock was
actually unlocked at least momentarily if we return claiming
that it is not locked. None of the callers actually rely on
this behavior, as far as I know, however.
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
-rw-r--r-- | arch/tile/include/asm/spinlock_32.h | 6 | ||||
-rw-r--r-- | arch/tile/include/asm/spinlock_64.h | 5 |
2 files changed, 9 insertions, 2 deletions
diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h index c0a77b38d39a..b14b1ba5bf9c 100644 --- a/arch/tile/include/asm/spinlock_32.h +++ b/arch/tile/include/asm/spinlock_32.h @@ -41,8 +41,12 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock) * to claim the lock is held, since it will be momentarily * if not already. There's no need to wait for a "valid" * lock->next_ticket to become available. + * Use READ_ONCE() to ensure that calling this in a loop is OK. */ - return lock->next_ticket != lock->current_ticket; + int curr = READ_ONCE(lock->current_ticket); + int next = READ_ONCE(lock->next_ticket); + + return next != curr; } void arch_spin_lock(arch_spinlock_t *lock); diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h index 9a12b9c7e5d3..b9718fb4e74a 100644 --- a/arch/tile/include/asm/spinlock_64.h +++ b/arch/tile/include/asm/spinlock_64.h @@ -18,6 +18,8 @@ #ifndef _ASM_TILE_SPINLOCK_64_H #define _ASM_TILE_SPINLOCK_64_H +#include <linux/compiler.h> + /* Shifts and masks for the various fields in "lock". */ #define __ARCH_SPIN_CURRENT_SHIFT 17 #define __ARCH_SPIN_NEXT_MASK 0x7fff @@ -44,7 +46,8 @@ static inline u32 arch_spin_next(u32 val) /* The lock is locked if a task would have to wait to get it. */ static inline int arch_spin_is_locked(arch_spinlock_t *lock) { - u32 val = lock->lock; + /* Use READ_ONCE() to ensure that calling this in a loop is OK. */ + u32 val = READ_ONCE(lock->lock); return arch_spin_current(val) != arch_spin_next(val); } |