diff options
author | Linus Torvalds | 2023-05-05 12:56:55 -0700 |
---|---|---|
committer | Linus Torvalds | 2023-05-05 12:56:55 -0700 |
commit | b115d85a9584c98f9a7dec209d835462aa1adc09 (patch) | |
tree | 332af6e2fafef47b9dc9891282b422eb5dcc5f3f /arch/xtensa | |
parent | d5ed10bb80bb376501cb56015a47457647efaabf (diff) | |
parent | ec570320b09f76d52819e60abdccf372658216b6 (diff) |
Merge tag 'locking-core-2023-05-05' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
- Introduce local{,64}_try_cmpxchg() - a slightly more optimal
primitive, which will be used in perf events ring-buffer code
- Simplify/modify rwsems on PREEMPT_RT, to address writer starvation
- Misc cleanups/fixes
* tag 'locking-core-2023-05-05' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
locking/atomic: Correct (cmp)xchg() instrumentation
locking/x86: Define arch_try_cmpxchg_local()
locking/arch: Wire up local_try_cmpxchg()
locking/generic: Wire up local{,64}_try_cmpxchg()
locking/atomic: Add generic try_cmpxchg{,64}_local() support
locking/rwbase: Mitigate indefinite writer starvation
locking/arch: Rename all internal __xchg() names to __arch_xchg()
Diffstat (limited to 'arch/xtensa')
-rw-r--r-- | arch/xtensa/include/asm/cmpxchg.h | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h index eb87810357ad..675a11ea8de7 100644 --- a/arch/xtensa/include/asm/cmpxchg.h +++ b/arch/xtensa/include/asm/cmpxchg.h @@ -170,7 +170,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) } #define arch_xchg(ptr,x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + ((__typeof__(*(ptr)))__arch_xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) static inline u32 xchg_small(volatile void *ptr, u32 x, int size) { @@ -203,7 +203,7 @@ static inline u32 xchg_small(volatile void *ptr, u32 x, int size) extern void __xchg_called_with_bad_pointer(void); static __inline__ unsigned long -__xchg(unsigned long x, volatile void * ptr, int size) +__arch_xchg(unsigned long x, volatile void * ptr, int size) { switch (size) { case 1: |