aboutsummaryrefslogtreecommitdiff
path: root/arch/alpha
diff options
context:
space:
mode:
authorPeter Zijlstra2016-04-18 01:09:20 +0200
committerIngo Molnar2016-06-16 10:48:33 +0200
commitfe14d2f12d5e641f114e27c2ea1fb85843c58967 (patch)
treef213497b302298bd1a898bb09a44ef38c1439813 /arch/alpha
parente37837fb62f95a81bdcefa86ceea043df84937d7 (diff)
locking/atomic, arch/alpha: Convert to _relaxed atomics
Generic code will construct {,_acquire,_release} versions by adding the required smp_mb__{before,after}_atomic() calls. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Richard Henderson <rth@twiddle.net> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-alpha@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/alpha')
-rw-r--r--arch/alpha/include/asm/atomic.h36
1 files changed, 24 insertions, 12 deletions
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 5377ca8bb503..498933a7df97 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -46,10 +46,9 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
} \
#define ATOMIC_OP_RETURN(op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
long temp, result; \
- smp_mb(); \
__asm__ __volatile__( \
"1: ldl_l %0,%1\n" \
" " #asm_op " %0,%3,%2\n" \
@@ -61,15 +60,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
- smp_mb(); \
return result; \
}
#define ATOMIC_FETCH_OP(op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
long temp, result; \
- smp_mb(); \
__asm__ __volatile__( \
"1: ldl_l %2,%1\n" \
" " #asm_op " %2,%3,%0\n" \
@@ -80,7 +77,6 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
- smp_mb(); \
return result; \
}
@@ -101,10 +97,9 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
} \
#define ATOMIC64_OP_RETURN(op, asm_op) \
-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
+static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
{ \
long temp, result; \
- smp_mb(); \
__asm__ __volatile__( \
"1: ldq_l %0,%1\n" \
" " #asm_op " %0,%3,%2\n" \
@@ -116,15 +111,13 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
- smp_mb(); \
return result; \
}
#define ATOMIC64_FETCH_OP(op, asm_op) \
-static __inline__ long atomic64_fetch_##op(long i, atomic64_t * v) \
+static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
{ \
long temp, result; \
- smp_mb(); \
__asm__ __volatile__( \
"1: ldq_l %2,%1\n" \
" " #asm_op " %2,%3,%0\n" \
@@ -135,7 +128,6 @@ static __inline__ long atomic64_fetch_##op(long i, atomic64_t * v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
- smp_mb(); \
return result; \
}
@@ -150,6 +142,16 @@ static __inline__ long atomic64_fetch_##op(long i, atomic64_t * v) \
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
+#define atomic_add_return_relaxed atomic_add_return_relaxed
+#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+
+#define atomic64_add_return_relaxed atomic64_add_return_relaxed
+#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+
#define atomic_andnot atomic_andnot
#define atomic64_andnot atomic64_andnot
@@ -165,6 +167,16 @@ ATOMIC_OPS(andnot, bic)
ATOMIC_OPS(or, bis)
ATOMIC_OPS(xor, xor)
+#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
+#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
+#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+
+#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
+#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+
#undef ATOMIC_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN