diff options
author | Guo Ren | 2022-07-23 21:32:34 -0400 |
---|---|---|
committer | Guo Ren | 2022-07-31 05:24:42 -0400 |
commit | 45e15c1a375ea380d55880be2f8182cb737b60ed (patch) | |
tree | d877ed3a2314bfe6ebfb2a6aa57ea9ef0ec4ec91 /arch/csky | |
parent | 4e8bb4ba5a558159ffbfa7e60322a1c151c3903c (diff) |
csky: Add qspinlock support
Enable qspinlock by the requirements mentioned in a8ad07e5240c9
("asm-generic: qspinlock: Indicate the use of mixed-size atomics").
C-SKY only has "ldex/stex" for all atomic operations. So csky give a
strong forward guarantee for "ldex/stex." That means when ldex grabbed
the cache line into $L1, it would block other cores from snooping the
address with several cycles. The atomic_fetch_add & xchg16 has the same
forward guarantee level in C-SKY.
Qspinlock has better code size and performance in a fast path.
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Diffstat (limited to 'arch/csky')
-rw-r--r-- | arch/csky/Kconfig | 1 | ||||
-rw-r--r-- | arch/csky/include/asm/Kbuild | 4 | ||||
-rw-r--r-- | arch/csky/include/asm/cmpxchg.h | 20 | ||||
-rw-r--r-- | arch/csky/include/asm/spinlock.h | 12 | ||||
-rw-r--r-- | arch/csky/include/asm/spinlock_types.h | 9 |
5 files changed, 44 insertions, 2 deletions
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 41d7d614f7a2..333def12faef 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -8,6 +8,7 @@ config CSKY select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_QUEUED_RWLOCKS + select ARCH_USE_QUEUED_SPINLOCKS select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace) select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT select COMMON_CLK diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild index 103207a58f97..1117c28cb7e8 100644 --- a/arch/csky/include/asm/Kbuild +++ b/arch/csky/include/asm/Kbuild @@ -3,10 +3,10 @@ generic-y += asm-offsets.h generic-y += extable.h generic-y += gpio.h generic-y += kvm_para.h -generic-y += spinlock.h -generic-y += spinlock_types.h +generic-y += mcs_spinlock.h generic-y += qrwlock.h generic-y += qrwlock_types.h +generic-y += qspinlock.h generic-y += parport.h generic-y += user.h generic-y += vmlinux.lds.h diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h index 5b8faccd65e4..5f693fadb56c 100644 --- a/arch/csky/include/asm/cmpxchg.h +++ b/arch/csky/include/asm/cmpxchg.h @@ -15,6 +15,26 @@ extern void __bad_xchg(void); __typeof__(*(ptr)) __ret; \ unsigned long tmp; \ switch (size) { \ + case 2: { \ + u32 ret; \ + u32 shif = ((ulong)__ptr & 2) ? 16 : 0; \ + u32 mask = 0xffff << shif; \ + __ptr = (__typeof__(ptr))((ulong)__ptr & ~2); \ + __asm__ __volatile__ ( \ + "1: ldex.w %0, (%4)\n" \ + " and %1, %0, %2\n" \ + " or %1, %1, %3\n" \ + " stex.w %1, (%4)\n" \ + " bez %1, 1b\n" \ + : "=&r" (ret), "=&r" (tmp) \ + : "r" (~mask), \ + "r" ((u32)__new << shif), \ + "r" (__ptr) \ + : "memory"); \ + __ret = (__typeof__(*(ptr))) \ + ((ret & mask) >> shif); \ + break; \ + } \ case 4: \ asm volatile ( \ "1: ldex.w %0, (%3) \n" \ diff --git a/arch/csky/include/asm/spinlock.h b/arch/csky/include/asm/spinlock.h new file mode 100644 index 000000000000..83a2005341f5 --- /dev/null +++ b/arch/csky/include/asm/spinlock.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_SPINLOCK_H +#define __ASM_CSKY_SPINLOCK_H + +#include <asm/qspinlock.h> +#include <asm/qrwlock.h> + +/* See include/linux/spinlock.h */ +#define smp_mb__after_spinlock() smp_mb() + +#endif /* __ASM_CSKY_SPINLOCK_H */ diff --git a/arch/csky/include/asm/spinlock_types.h b/arch/csky/include/asm/spinlock_types.h new file mode 100644 index 000000000000..75bdf3af80ba --- /dev/null +++ b/arch/csky/include/asm/spinlock_types.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_CSKY_SPINLOCK_TYPES_H +#define __ASM_CSKY_SPINLOCK_TYPES_H + +#include <asm-generic/qspinlock_types.h> +#include <asm-generic/qrwlock_types.h> + +#endif /* __ASM_CSKY_SPINLOCK_TYPES_H */ |