diff options
author | Linus Torvalds | 2018-02-25 16:29:59 -0800 |
---|---|---|
committer | Linus Torvalds | 2018-02-25 16:29:59 -0800 |
commit | 9c897096bbabaca1ad042a35cc1ef441d809ddc8 (patch) | |
tree | 97588a441878a6efe10711e059419316645e80b1 /arch | |
parent | 297ea1b7f717db660fa6eb543dc0af41f7bf8d57 (diff) | |
parent | 472e8c55cf6622d1c112dc2bc777f68bbd4189db (diff) |
Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking fixes from Thomas Gleixner:
"Three patches to fix memory ordering issues on ALPHA and a comment to
clarify the usage scope of a mutex internal function"
* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
locking/xchg/alpha: Fix xchg() and cmpxchg() memory ordering bugs
locking/xchg/alpha: Clean up barrier usage by using smp_mb() in place of __ASM__MB
locking/xchg/alpha: Add unconditional memory barrier to cmpxchg()
locking/mutex: Add comment to __mutex_owner() to deter usage
Diffstat (limited to 'arch')
-rw-r--r-- | arch/alpha/include/asm/cmpxchg.h | 6 | ||||
-rw-r--r-- | arch/alpha/include/asm/xchg.h | 38 |
2 files changed, 26 insertions, 18 deletions
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h index 46ebf14aed4e..8a2b331e43fe 100644 --- a/arch/alpha/include/asm/cmpxchg.h +++ b/arch/alpha/include/asm/cmpxchg.h @@ -6,7 +6,6 @@ * Atomic exchange routines. */ -#define __ASM__MB #define ____xchg(type, args...) __xchg ## type ## _local(args) #define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args) #include <asm/xchg.h> @@ -33,10 +32,6 @@ cmpxchg_local((ptr), (o), (n)); \ }) -#ifdef CONFIG_SMP -#undef __ASM__MB -#define __ASM__MB "\tmb\n" -#endif #undef ____xchg #undef ____cmpxchg #define ____xchg(type, args...) __xchg ##type(args) @@ -64,7 +59,6 @@ cmpxchg((ptr), (o), (n)); \ }) -#undef __ASM__MB #undef ____cmpxchg #endif /* _ALPHA_CMPXCHG_H */ diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h index 68dfb3cb7145..e2b59fac5257 100644 --- a/arch/alpha/include/asm/xchg.h +++ b/arch/alpha/include/asm/xchg.h @@ -12,6 +12,10 @@ * Atomic exchange. * Since it can be used to implement critical sections * it must clobber "memory" (also for interrupts in UP). + * + * The leading and the trailing memory barriers guarantee that these + * operations are fully ordered. + * */ static inline unsigned long @@ -19,6 +23,7 @@ ____xchg(_u8, volatile char *m, unsigned long val) { unsigned long ret, tmp, addr64; + smp_mb(); __asm__ __volatile__( " andnot %4,7,%3\n" " insbl %1,%4,%1\n" @@ -28,12 +33,12 @@ ____xchg(_u8, volatile char *m, unsigned long val) " or %1,%2,%2\n" " stq_c %2,0(%3)\n" " beq %2,2f\n" - __ASM__MB ".subsection 2\n" "2: br 1b\n" ".previous" : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) : "r" ((long)m), "1" (val) : "memory"); + smp_mb(); return ret; } @@ -43,6 +48,7 @@ ____xchg(_u16, volatile short *m, unsigned long val) { unsigned long ret, tmp, addr64; + smp_mb(); __asm__ __volatile__( " andnot %4,7,%3\n" " inswl %1,%4,%1\n" @@ -52,12 +58,12 @@ ____xchg(_u16, volatile short *m, unsigned long val) " or %1,%2,%2\n" " stq_c %2,0(%3)\n" " beq %2,2f\n" - __ASM__MB ".subsection 2\n" "2: br 1b\n" ".previous" : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) : "r" ((long)m), "1" (val) : "memory"); + smp_mb(); return ret; } @@ -67,17 +73,18 @@ ____xchg(_u32, volatile int *m, unsigned long val) { unsigned long dummy; + smp_mb(); __asm__ __volatile__( "1: ldl_l %0,%4\n" " bis $31,%3,%1\n" " stl_c %1,%2\n" " beq %1,2f\n" - __ASM__MB ".subsection 2\n" "2: br 1b\n" ".previous" : "=&r" (val), "=&r" (dummy), "=m" (*m) : "rI" (val), "m" (*m) : "memory"); + smp_mb(); return val; } @@ -87,17 +94,18 @@ ____xchg(_u64, volatile long *m, unsigned long val) { unsigned long dummy; + smp_mb(); __asm__ __volatile__( "1: ldq_l %0,%4\n" " bis $31,%3,%1\n" " stq_c %1,%2\n" " beq %1,2f\n" - __ASM__MB ".subsection 2\n" "2: br 1b\n" ".previous" : "=&r" (val), "=&r" (dummy), "=m" (*m) : "rI" (val), "m" (*m) : "memory"); + smp_mb(); return val; } @@ -128,10 +136,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size) * store NEW in MEM. Return the initial value in MEM. Success is * indicated by comparing RETURN with OLD. * - * The memory barrier should be placed in SMP only when we actually - * make the change. If we don't change anything (so if the returned - * prev is equal to old) then we aren't acquiring anything new and - * we don't need any memory barrier as far I can tell. + * The leading and the trailing memory barriers guarantee that these + * operations are fully ordered. + * + * The trailing memory barrier is placed in SMP unconditionally, in + * order to guarantee that dependency ordering is preserved when a + * dependency is headed by an unsuccessful operation. */ static inline unsigned long @@ -139,6 +149,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) { unsigned long prev, tmp, cmp, addr64; + smp_mb(); __asm__ __volatile__( " andnot %5,7,%4\n" " insbl %1,%5,%1\n" @@ -150,13 +161,13 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) " or %1,%2,%2\n" " stq_c %2,0(%4)\n" " beq %2,3f\n" - __ASM__MB "2:\n" ".subsection 2\n" "3: br 1b\n" ".previous" : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + smp_mb(); return prev; } @@ -166,6 +177,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) { unsigned long prev, tmp, cmp, addr64; + smp_mb(); __asm__ __volatile__( " andnot %5,7,%4\n" " inswl %1,%5,%1\n" @@ -177,13 +189,13 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) " or %1,%2,%2\n" " stq_c %2,0(%4)\n" " beq %2,3f\n" - __ASM__MB "2:\n" ".subsection 2\n" "3: br 1b\n" ".previous" : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + smp_mb(); return prev; } @@ -193,6 +205,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new) { unsigned long prev, cmp; + smp_mb(); __asm__ __volatile__( "1: ldl_l %0,%5\n" " cmpeq %0,%3,%1\n" @@ -200,13 +213,13 @@ ____cmpxchg(_u32, volatile int *m, int old, int new) " mov %4,%1\n" " stl_c %1,%2\n" " beq %1,3f\n" - __ASM__MB "2:\n" ".subsection 2\n" "3: br 1b\n" ".previous" : "=&r"(prev), "=&r"(cmp), "=m"(*m) : "r"((long) old), "r"(new), "m"(*m) : "memory"); + smp_mb(); return prev; } @@ -216,6 +229,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) { unsigned long prev, cmp; + smp_mb(); __asm__ __volatile__( "1: ldq_l %0,%5\n" " cmpeq %0,%3,%1\n" @@ -223,13 +237,13 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) " mov %4,%1\n" " stq_c %1,%2\n" " beq %1,3f\n" - __ASM__MB "2:\n" ".subsection 2\n" "3: br 1b\n" ".previous" : "=&r"(prev), "=&r"(cmp), "=m"(*m) : "r"((long) old), "r"(new), "m"(*m) : "memory"); + smp_mb(); return prev; } |