aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds2022-08-04 15:27:20 -0700
committerLinus Torvalds2022-08-04 15:27:20 -0700
commit7df9075e232e09d99cf23b657b6cb04c9506e618 (patch)
tree2f1853fc2310fee78a91067315b78fdd733578d4 /arch
parent25e6bed5a64836621e41bc9f9d97eb79f1bdfa1e (diff)
parent45fef4c4b9c94e86d9c13f0b2e7e71bb32254509 (diff)
Merge tag 'csky-for-linus-6.0-rc1' of https://github.com/c-sky/csky-linux
Pull csky updates from Guo Ren: - Add jump-label implementation - Add qspinlock support - Enable ARCH_INLINE_READ*/WRITE*/SPIN* - Some fixups and a coding convention * tag 'csky-for-linus-6.0-rc1' of https://github.com/c-sky/csky-linux: csky: abiv1: Fixup compile error csky: cmpxchg: Coding convention for BUILD_BUG() csky: Enable ARCH_INLINE_READ*/WRITE*/SPIN* csky: Add qspinlock support csky: Add jump-label implementation csky: Move HEAD_TEXT_SECTION out of __init_begin-end csky: Correct position of _stext csky: Use the bitmap API to allocate bitmaps csky/kprobe: reclaim insn_slot on kprobe unregistration
Diffstat (limited to 'arch')
-rw-r--r--arch/csky/Kconfig29
-rw-r--r--arch/csky/abiv1/inc/abi/string.h6
-rw-r--r--arch/csky/include/asm/Kbuild4
-rw-r--r--arch/csky/include/asm/cmpxchg.h31
-rw-r--r--arch/csky/include/asm/jump_label.h47
-rw-r--r--arch/csky/include/asm/sections.h10
-rw-r--r--arch/csky/include/asm/spinlock.h12
-rw-r--r--arch/csky/include/asm/spinlock_types.h9
-rw-r--r--arch/csky/kernel/Makefile1
-rw-r--r--arch/csky/kernel/jump_label.c54
-rw-r--r--arch/csky/kernel/probes/kprobes.c4
-rw-r--r--arch/csky/kernel/setup.c4
-rw-r--r--arch/csky/kernel/vmlinux.lds.S15
-rw-r--r--arch/csky/mm/asid.c5
14 files changed, 211 insertions, 20 deletions
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index f55ba1745f7b..3cbc2dc62baf 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -8,6 +8,33 @@ config CSKY
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_USE_QUEUED_SPINLOCKS
+ select ARCH_INLINE_READ_LOCK if !PREEMPTION
+ select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
+ select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION
+ select ARCH_INLINE_READ_UNLOCK if !PREEMPTION
+ select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION
+ select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION
+ select ARCH_INLINE_WRITE_LOCK if !PREEMPTION
+ select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION
+ select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION
+ select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION
+ select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION
+ select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION
+ select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION
+ select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION
+ select ARCH_INLINE_SPIN_LOCK if !PREEMPTION
+ select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION
+ select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION
+ select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION
+ select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
+ select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace)
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select COMMON_CLK
@@ -40,6 +67,8 @@ config CSKY
select GX6605S_TIMER if CPU_CK610
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_JUMP_LABEL if !CPU_CK610
+ select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_CONTEXT_TRACKING_USER
diff --git a/arch/csky/abiv1/inc/abi/string.h b/arch/csky/abiv1/inc/abi/string.h
index 9d95594b0feb..de50117b904d 100644
--- a/arch/csky/abiv1/inc/abi/string.h
+++ b/arch/csky/abiv1/inc/abi/string.h
@@ -6,4 +6,10 @@
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *, const void *, __kernel_size_t);
+#define __HAVE_ARCH_MEMMOVE
+extern void *memmove(void *, const void *, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMSET
+extern void *memset(void *, int, __kernel_size_t);
+
#endif /* __ABI_CSKY_STRING_H */
diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild
index 103207a58f97..1117c28cb7e8 100644
--- a/arch/csky/include/asm/Kbuild
+++ b/arch/csky/include/asm/Kbuild
@@ -3,10 +3,10 @@ generic-y += asm-offsets.h
generic-y += extable.h
generic-y += gpio.h
generic-y += kvm_para.h
-generic-y += spinlock.h
-generic-y += spinlock_types.h
+generic-y += mcs_spinlock.h
generic-y += qrwlock.h
generic-y += qrwlock_types.h
+generic-y += qspinlock.h
generic-y += parport.h
generic-y += user.h
generic-y += vmlinux.lds.h
diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h
index 5b8faccd65e4..916043b845f1 100644
--- a/arch/csky/include/asm/cmpxchg.h
+++ b/arch/csky/include/asm/cmpxchg.h
@@ -4,10 +4,9 @@
#define __ASM_CSKY_CMPXCHG_H
#ifdef CONFIG_SMP
+#include <linux/bug.h>
#include <asm/barrier.h>
-extern void __bad_xchg(void);
-
#define __xchg_relaxed(new, ptr, size) \
({ \
__typeof__(ptr) __ptr = (ptr); \
@@ -15,6 +14,26 @@ extern void __bad_xchg(void);
__typeof__(*(ptr)) __ret; \
unsigned long tmp; \
switch (size) { \
+ case 2: { \
+ u32 ret; \
+ u32 shif = ((ulong)__ptr & 2) ? 16 : 0; \
+ u32 mask = 0xffff << shif; \
+ __ptr = (__typeof__(ptr))((ulong)__ptr & ~2); \
+ __asm__ __volatile__ ( \
+ "1: ldex.w %0, (%4)\n" \
+ " and %1, %0, %2\n" \
+ " or %1, %1, %3\n" \
+ " stex.w %1, (%4)\n" \
+ " bez %1, 1b\n" \
+ : "=&r" (ret), "=&r" (tmp) \
+ : "r" (~mask), \
+ "r" ((u32)__new << shif), \
+ "r" (__ptr) \
+ : "memory"); \
+ __ret = (__typeof__(*(ptr))) \
+ ((ret & mask) >> shif); \
+ break; \
+ } \
case 4: \
asm volatile ( \
"1: ldex.w %0, (%3) \n" \
@@ -26,7 +45,7 @@ extern void __bad_xchg(void);
:); \
break; \
default: \
- __bad_xchg(); \
+ BUILD_BUG(); \
} \
__ret; \
})
@@ -56,7 +75,7 @@ extern void __bad_xchg(void);
:); \
break; \
default: \
- __bad_xchg(); \
+ BUILD_BUG(); \
} \
__ret; \
})
@@ -87,7 +106,7 @@ extern void __bad_xchg(void);
:); \
break; \
default: \
- __bad_xchg(); \
+ BUILD_BUG(); \
} \
__ret; \
})
@@ -119,7 +138,7 @@ extern void __bad_xchg(void);
:); \
break; \
default: \
- __bad_xchg(); \
+ BUILD_BUG(); \
} \
__ret; \
})
diff --git a/arch/csky/include/asm/jump_label.h b/arch/csky/include/asm/jump_label.h
new file mode 100644
index 000000000000..d488ba6084bc
--- /dev/null
+++ b/arch/csky/include/asm/jump_label.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_CSKY_JUMP_LABEL_H
+#define __ASM_CSKY_JUMP_LABEL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+#define JUMP_LABEL_NOP_SIZE 4
+
+static __always_inline bool arch_static_branch(struct static_key *key,
+ bool branch)
+{
+ asm_volatile_goto(
+ "1: nop32 \n"
+ " .pushsection __jump_table, \"aw\" \n"
+ " .align 2 \n"
+ " .long 1b - ., %l[label] - . \n"
+ " .long %0 - . \n"
+ " .popsection \n"
+ : : "i"(&((char *)key)[branch]) : : label);
+
+ return false;
+label:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key,
+ bool branch)
+{
+ asm_volatile_goto(
+ "1: bsr32 %l[label] \n"
+ " .pushsection __jump_table, \"aw\" \n"
+ " .align 2 \n"
+ " .long 1b - ., %l[label] - . \n"
+ " .long %0 - . \n"
+ " .popsection \n"
+ : : "i"(&((char *)key)[branch]) : : label);
+
+ return false;
+label:
+ return true;
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_CSKY_JUMP_LABEL_H */
diff --git a/arch/csky/include/asm/sections.h b/arch/csky/include/asm/sections.h
new file mode 100644
index 000000000000..4192cba8445d
--- /dev/null
+++ b/arch/csky/include/asm/sections.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_SECTIONS_H
+#define __ASM_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char _start[];
+
+#endif /* __ASM_SECTIONS_H */
diff --git a/arch/csky/include/asm/spinlock.h b/arch/csky/include/asm/spinlock.h
new file mode 100644
index 000000000000..83a2005341f5
--- /dev/null
+++ b/arch/csky/include/asm/spinlock.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_SPINLOCK_H
+#define __ASM_CSKY_SPINLOCK_H
+
+#include <asm/qspinlock.h>
+#include <asm/qrwlock.h>
+
+/* See include/linux/spinlock.h */
+#define smp_mb__after_spinlock() smp_mb()
+
+#endif /* __ASM_CSKY_SPINLOCK_H */
diff --git a/arch/csky/include/asm/spinlock_types.h b/arch/csky/include/asm/spinlock_types.h
new file mode 100644
index 000000000000..75bdf3af80ba
--- /dev/null
+++ b/arch/csky/include/asm/spinlock_types.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_SPINLOCK_TYPES_H
+#define __ASM_CSKY_SPINLOCK_TYPES_H
+
+#include <asm-generic/qspinlock_types.h>
+#include <asm-generic/qrwlock_types.h>
+
+#endif /* __ASM_CSKY_SPINLOCK_TYPES_H */
diff --git a/arch/csky/kernel/Makefile b/arch/csky/kernel/Makefile
index 4eb41421ca5b..6f14c924b20d 100644
--- a/arch/csky/kernel/Makefile
+++ b/arch/csky/kernel/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_CSKY_PMU_V1) += perf_event.o
obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
diff --git a/arch/csky/kernel/jump_label.c b/arch/csky/kernel/jump_label.c
new file mode 100644
index 000000000000..d0e8b21447e1
--- /dev/null
+++ b/arch/csky/kernel/jump_label.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+#define NOP32_HI 0xc400
+#define NOP32_LO 0x4820
+#define BSR_LINK 0xe000
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ unsigned long addr = jump_entry_code(entry);
+ u16 insn[2];
+ int ret = 0;
+
+ if (type == JUMP_LABEL_JMP) {
+ long offset = jump_entry_target(entry) - jump_entry_code(entry);
+
+ if (WARN_ON(offset & 1 || offset < -67108864 || offset >= 67108864))
+ return;
+
+ offset = offset >> 1;
+
+ insn[0] = BSR_LINK |
+ ((uint16_t)((unsigned long) offset >> 16) & 0x3ff);
+ insn[1] = (uint16_t)((unsigned long) offset & 0xffff);
+ } else {
+ insn[0] = NOP32_HI;
+ insn[1] = NOP32_LO;
+ }
+
+ ret = copy_to_kernel_nofault((void *)addr, insn, 4);
+ WARN_ON(ret);
+
+ flush_icache_range(addr, addr + 4);
+}
+
+void arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ /*
+ * We use the same instructions in the arch_static_branch and
+ * arch_static_branch_jump inline functions, so there's no
+ * need to patch them up here.
+ * The core will call arch_jump_label_transform when those
+ * instructions need to be replaced.
+ */
+ arch_jump_label_transform(entry, type);
+}
diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c
index 34ba684d5962..3c6e5c725d81 100644
--- a/arch/csky/kernel/probes/kprobes.c
+++ b/arch/csky/kernel/probes/kprobes.c
@@ -124,6 +124,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
+ if (p->ainsn.api.insn) {
+ free_insn_slot(p->ainsn.api.insn, 0);
+ p->ainsn.api.insn = NULL;
+ }
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c
index c64e7be2045b..106fbf0b6f3b 100644
--- a/arch/csky/kernel/setup.c
+++ b/arch/csky/kernel/setup.c
@@ -31,7 +31,7 @@ static void __init csky_memblock_init(void)
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
signed long size;
- memblock_reserve(__pa(_stext), _end - _stext);
+ memblock_reserve(__pa(_start), _end - _start);
early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
@@ -78,7 +78,7 @@ void __init setup_arch(char **cmdline_p)
pr_info("Phys. mem: %ldMB\n",
(unsigned long) memblock_phys_mem_size()/1024/1024);
- setup_initial_init_mm(_stext, _etext, _edata, _end);
+ setup_initial_init_mm(_start, _etext, _edata, _end);
parse_early_param();
diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S
index e8b1a4a49798..68c980d08482 100644
--- a/arch/csky/kernel/vmlinux.lds.S
+++ b/arch/csky/kernel/vmlinux.lds.S
@@ -22,17 +22,13 @@ SECTIONS
{
. = PAGE_OFFSET + PHYS_OFFSET_OFFSET;
- _stext = .;
- __init_begin = .;
+ _start = .;
HEAD_TEXT_SECTION
- INIT_TEXT_SECTION(PAGE_SIZE)
- INIT_DATA_SECTION(PAGE_SIZE)
- PERCPU_SECTION(L1_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
- __init_end = .;
.text : AT(ADDR(.text) - LOAD_OFFSET) {
_text = .;
+ _stext = .;
VBR_BASE
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
@@ -48,7 +44,12 @@ SECTIONS
/* __init_begin __init_end must be page aligned for free_initmem */
. = ALIGN(PAGE_SIZE);
-
+ __init_begin = .;
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(PAGE_SIZE)
+ PERCPU_SECTION(L1_CACHE_BYTES)
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
_sdata = .;
RO_DATA(PAGE_SIZE)
diff --git a/arch/csky/mm/asid.c b/arch/csky/mm/asid.c
index b2e914745c1d..7fb6c417bbac 100644
--- a/arch/csky/mm/asid.c
+++ b/arch/csky/mm/asid.c
@@ -27,7 +27,7 @@ static void flush_context(struct asid_info *info)
u64 asid;
/* Update the list of reserved ASIDs and the ASID bitmap. */
- bitmap_clear(info->map, 0, NUM_CTXT_ASIDS(info));
+ bitmap_zero(info->map, NUM_CTXT_ASIDS(info));
for_each_possible_cpu(i) {
asid = atomic64_xchg_relaxed(&active_asid(info, i), 0);
@@ -178,8 +178,7 @@ int asid_allocator_init(struct asid_info *info,
*/
WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus());
atomic64_set(&info->generation, ASID_FIRST_VERSION(info));
- info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
- sizeof(*info->map), GFP_KERNEL);
+ info->map = bitmap_zalloc(NUM_CTXT_ASIDS(info), GFP_KERNEL);
if (!info->map)
return -ENOMEM;