diff options
author | Max Filippov | 2021-07-23 23:17:04 -0700 |
---|---|---|
committer | Max Filippov | 2021-10-18 22:19:34 -0700 |
commit | eda8dd1224d6c1c89eb6b687264da9ccfbffb0fd (patch) | |
tree | 7298c07e5bd0956395a87e9c3cee209ce4bdb434 /arch/xtensa | |
parent | e369953a5ba3295379095060f4ac72958da7c125 (diff) |
xtensa: use a14 instead of a15 in inline assembly
a15 is a frame pointer in the call0 xtensa ABI, don't use it explicitly
in the inline assembly. Use a14 instead, as it has the same properties
as a15 w.r.t. window overflow.
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Diffstat (limited to 'arch/xtensa')
-rw-r--r-- | arch/xtensa/include/asm/atomic.h | 26 | ||||
-rw-r--r-- | arch/xtensa/include/asm/cmpxchg.h | 16 |
2 files changed, 21 insertions, 21 deletions
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 4361fe4247e3..52da614f953c 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -25,15 +25,15 @@ * * Locking interrupts looks like this: * - * rsil a15, TOPLEVEL + * rsil a14, TOPLEVEL * <code> - * wsr a15, PS + * wsr a14, PS * rsync * - * Note that a15 is used here because the register allocation + * Note that a14 is used here because the register allocation * done by the compiler is not guaranteed and a window overflow * may not occur between the rsil and wsr instructions. By using - * a15 in the rsil, the machine is guaranteed to be in a state + * a14 in the rsil, the machine is guaranteed to be in a state * where no register reference will cause an overflow. */ @@ -185,15 +185,15 @@ static inline void arch_atomic_##op(int i, atomic_t * v) \ unsigned int vval; \ \ __asm__ __volatile__( \ - " rsil a15, "__stringify(TOPLEVEL)"\n" \ + " rsil a14, "__stringify(TOPLEVEL)"\n" \ " l32i %[result], %[mem]\n" \ " " #op " %[result], %[result], %[i]\n" \ " s32i %[result], %[mem]\n" \ - " wsr a15, ps\n" \ + " wsr a14, ps\n" \ " rsync\n" \ : [result] "=&a" (vval), [mem] "+m" (*v) \ : [i] "a" (i) \ - : "a15", "memory" \ + : "a14", "memory" \ ); \ } \ @@ -203,15 +203,15 @@ static inline int arch_atomic_##op##_return(int i, atomic_t * v) \ unsigned int vval; \ \ __asm__ __volatile__( \ - " rsil a15,"__stringify(TOPLEVEL)"\n" \ + " rsil a14,"__stringify(TOPLEVEL)"\n" \ " l32i %[result], %[mem]\n" \ " " #op " %[result], %[result], %[i]\n" \ " s32i %[result], %[mem]\n" \ - " wsr a15, ps\n" \ + " wsr a14, ps\n" \ " rsync\n" \ : [result] "=&a" (vval), [mem] "+m" (*v) \ : [i] "a" (i) \ - : "a15", "memory" \ + : "a14", "memory" \ ); \ \ return vval; \ @@ -223,16 +223,16 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \ unsigned int tmp, vval; \ \ __asm__ __volatile__( \ - " rsil a15,"__stringify(TOPLEVEL)"\n" \ + " rsil a14,"__stringify(TOPLEVEL)"\n" \ " l32i %[result], %[mem]\n" \ " " #op " %[tmp], %[result], %[i]\n" \ " s32i %[tmp], %[mem]\n" \ - " wsr a15, ps\n" \ + " wsr a14, ps\n" \ " rsync\n" \ : [result] "=&a" (vval), [tmp] "=&a" (tmp), \ [mem] "+m" (*v) \ : [i] "a" (i) \ - : "a15", "memory" \ + : "a14", "memory" \ ); \ \ return vval; \ diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h index 3699e2818efb..eb87810357ad 100644 --- a/arch/xtensa/include/asm/cmpxchg.h +++ b/arch/xtensa/include/asm/cmpxchg.h @@ -52,16 +52,16 @@ __cmpxchg_u32(volatile int *p, int old, int new) return new; #else __asm__ __volatile__( - " rsil a15, "__stringify(TOPLEVEL)"\n" + " rsil a14, "__stringify(TOPLEVEL)"\n" " l32i %[old], %[mem]\n" " bne %[old], %[cmp], 1f\n" " s32i %[new], %[mem]\n" "1:\n" - " wsr a15, ps\n" + " wsr a14, ps\n" " rsync\n" : [old] "=&a" (old), [mem] "+m" (*p) : [cmp] "a" (old), [new] "r" (new) - : "a15", "memory"); + : "a14", "memory"); return old; #endif } @@ -116,10 +116,10 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, /* * xchg_u32 * - * Note that a15 is used here because the register allocation + * Note that a14 is used here because the register allocation * done by the compiler is not guaranteed and a window overflow * may not occur between the rsil and wsr instructions. By using - * a15 in the rsil, the machine is guaranteed to be in a state + * a14 in the rsil, the machine is guaranteed to be in a state * where no register reference will cause an overflow. */ @@ -157,14 +157,14 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) #else unsigned long tmp; __asm__ __volatile__( - " rsil a15, "__stringify(TOPLEVEL)"\n" + " rsil a14, "__stringify(TOPLEVEL)"\n" " l32i %[tmp], %[mem]\n" " s32i %[val], %[mem]\n" - " wsr a15, ps\n" + " wsr a14, ps\n" " rsync\n" : [tmp] "=&a" (tmp), [mem] "+m" (*m) : [val] "a" (val) - : "a15", "memory"); + : "a14", "memory"); return tmp; #endif } |