aboutsummaryrefslogtreecommitdiff
path: root/arch/hexagon
diff options
context:
space:
mode:
authorLinus Torvalds2023-05-05 12:56:55 -0700
committerLinus Torvalds2023-05-05 12:56:55 -0700
commitb115d85a9584c98f9a7dec209d835462aa1adc09 (patch)
tree332af6e2fafef47b9dc9891282b422eb5dcc5f3f /arch/hexagon
parentd5ed10bb80bb376501cb56015a47457647efaabf (diff)
parentec570320b09f76d52819e60abdccf372658216b6 (diff)
Merge tag 'locking-core-2023-05-05' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: - Introduce local{,64}_try_cmpxchg() - a slightly more optimal primitive, which will be used in perf events ring-buffer code - Simplify/modify rwsems on PREEMPT_RT, to address writer starvation - Misc cleanups/fixes * tag 'locking-core-2023-05-05' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: locking/atomic: Correct (cmp)xchg() instrumentation locking/x86: Define arch_try_cmpxchg_local() locking/arch: Wire up local_try_cmpxchg() locking/generic: Wire up local{,64}_try_cmpxchg() locking/atomic: Add generic try_cmpxchg{,64}_local() support locking/rwbase: Mitigate indefinite writer starvation locking/arch: Rename all internal __xchg() names to __arch_xchg()
Diffstat (limited to 'arch/hexagon')
-rw-r--r--arch/hexagon/include/asm/cmpxchg.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
index cdb705e1496a..bf6cf5579cf4 100644
--- a/arch/hexagon/include/asm/cmpxchg.h
+++ b/arch/hexagon/include/asm/cmpxchg.h
@@ -9,7 +9,7 @@
#define _ASM_CMPXCHG_H
/*
- * __xchg - atomically exchange a register and a memory location
+ * __arch_xchg - atomically exchange a register and a memory location
* @x: value to swap
* @ptr: pointer to memory
* @size: size of the value
@@ -19,8 +19,8 @@
* Note: there was an errata for V2 about .new's and memw_locked.
*
*/
-static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
- int size)
+static inline unsigned long
+__arch_xchg(unsigned long x, volatile void *ptr, int size)
{
unsigned long retval;
@@ -42,8 +42,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
* Atomically swap the contents of a register with memory. Should be atomic
* between multiple CPU's and within interrupts on the same CPU.
*/
-#define arch_xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \
- sizeof(*(ptr))))
+#define arch_xchg(ptr, v) ((__typeof__(*(ptr)))__arch_xchg((unsigned long)(v), (ptr), \
+ sizeof(*(ptr))))
/*
* see rt-mutex-design.txt; cmpxchg supposedly checks if *ptr == A and swaps.