diff options
author | Linus Torvalds | 2021-06-28 11:45:29 -0700 |
---|---|---|
committer | Linus Torvalds | 2021-06-28 11:45:29 -0700 |
commit | a15286c63d113d4296c58867994cd266a28f5d6d (patch) | |
tree | aaebbc86918c0c1943c26115d5bb1dd6c2fc2d9b /arch/openrisc | |
parent | b89c07dea16137696d0f2d479ef665ef7c1022ab (diff) | |
parent | 0e8a89d49d45197770f2e57fb15f1bc9ded96eb0 (diff) |
Merge tag 'locking-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
- Core locking & atomics:
- Convert all architectures to ARCH_ATOMIC: move every architecture
to ARCH_ATOMIC, then get rid of ARCH_ATOMIC and all the
transitory facilities and #ifdefs.
Much reduction in complexity from that series:
63 files changed, 756 insertions(+), 4094 deletions(-)
- Self-test enhancements
- Futexes:
- Add the new FUTEX_LOCK_PI2 ABI, which is a variant that doesn't
set FLAGS_CLOCKRT (.e. uses CLOCK_MONOTONIC).
[ The temptation to repurpose FUTEX_LOCK_PI's implicit setting of
FLAGS_CLOCKRT & invert the flag's meaning to avoid having to
introduce a new variant was resisted successfully. ]
- Enhance futex self-tests
- Lockdep:
- Fix dependency path printouts
- Optimize trace saving
- Broaden & fix wait-context checks
- Misc cleanups and fixes.
* tag 'locking-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (52 commits)
locking/lockdep: Correct the description error for check_redundant()
futex: Provide FUTEX_LOCK_PI2 to support clock selection
futex: Prepare futex_lock_pi() for runtime clock selection
lockdep/selftest: Remove wait-type RCU_CALLBACK tests
lockdep/selftests: Fix selftests vs PROVE_RAW_LOCK_NESTING
lockdep: Fix wait-type for empty stack
locking/selftests: Add a selftest for check_irq_usage()
lockding/lockdep: Avoid to find wrong lock dep path in check_irq_usage()
locking/lockdep: Remove the unnecessary trace saving
locking/lockdep: Fix the dep path printing for backwards BFS
selftests: futex: Add futex compare requeue test
selftests: futex: Add futex wait test
seqlock: Remove trailing semicolon in macros
locking/lockdep: Reduce LOCKDEP dependency list
locking/lockdep,doc: Improve readability of the block matrix
locking/atomics: atomic-instrumented: simplify ifdeffery
locking/atomic: delete !ARCH_ATOMIC remnants
locking/atomic: xtensa: move to ARCH_ATOMIC
locking/atomic: sparc: move to ARCH_ATOMIC
locking/atomic: sh: move to ARCH_ATOMIC
...
Diffstat (limited to 'arch/openrisc')
-rw-r--r-- | arch/openrisc/include/asm/atomic.h | 42 | ||||
-rw-r--r-- | arch/openrisc/include/asm/cmpxchg.h | 4 |
2 files changed, 28 insertions, 18 deletions
diff --git a/arch/openrisc/include/asm/atomic.h b/arch/openrisc/include/asm/atomic.h index b589fac39b92..326167e4783a 100644 --- a/arch/openrisc/include/asm/atomic.h +++ b/arch/openrisc/include/asm/atomic.h @@ -13,7 +13,7 @@ /* Atomically perform op with v->counter and i */ #define ATOMIC_OP(op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ int tmp; \ \ @@ -30,7 +30,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ /* Atomically perform op with v->counter and i, return the result */ #define ATOMIC_OP_RETURN(op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ +static inline int arch_atomic_##op##_return(int i, atomic_t *v) \ { \ int tmp; \ \ @@ -49,7 +49,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ /* Atomically perform op with v->counter and i, return orig v->counter */ #define ATOMIC_FETCH_OP(op) \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ { \ int tmp, old; \ \ @@ -75,6 +75,8 @@ ATOMIC_FETCH_OP(and) ATOMIC_FETCH_OP(or) ATOMIC_FETCH_OP(xor) +ATOMIC_OP(add) +ATOMIC_OP(sub) ATOMIC_OP(and) ATOMIC_OP(or) ATOMIC_OP(xor) @@ -83,16 +85,18 @@ ATOMIC_OP(xor) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define atomic_add_return atomic_add_return -#define atomic_sub_return atomic_sub_return -#define atomic_fetch_add atomic_fetch_add -#define atomic_fetch_sub atomic_fetch_sub -#define atomic_fetch_and atomic_fetch_and -#define atomic_fetch_or atomic_fetch_or -#define atomic_fetch_xor atomic_fetch_xor -#define atomic_and atomic_and -#define atomic_or atomic_or -#define atomic_xor atomic_xor +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor +#define arch_atomic_add arch_atomic_add +#define arch_atomic_sub arch_atomic_sub +#define arch_atomic_and arch_atomic_and +#define arch_atomic_or arch_atomic_or +#define arch_atomic_xor arch_atomic_xor /* * Atomically add a to v->counter as long as v is not already u. @@ -100,7 +104,7 @@ ATOMIC_OP(xor) * * This is often used through atomic_inc_not_zero() */ -static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) +static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) { int old, tmp; @@ -119,8 +123,14 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) return old; } -#define atomic_fetch_add_unless atomic_fetch_add_unless +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless -#include <asm-generic/atomic.h> +#define arch_atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i)) + +#include <asm/cmpxchg.h> + +#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v))) +#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new))) #endif /* __ASM_OPENRISC_ATOMIC_H */ diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h index f9cd43a39d72..79fd16162ccb 100644 --- a/arch/openrisc/include/asm/cmpxchg.h +++ b/arch/openrisc/include/asm/cmpxchg.h @@ -132,7 +132,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, } } -#define cmpxchg(ptr, o, n) \ +#define arch_cmpxchg(ptr, o, n) \ ({ \ (__typeof__(*(ptr))) __cmpxchg((ptr), \ (unsigned long)(o), \ @@ -161,7 +161,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long with, } } -#define xchg(ptr, with) \ +#define arch_xchg(ptr, with) \ ({ \ (__typeof__(*(ptr))) __xchg((ptr), \ (unsigned long)(with), \ |