diff options
author | Linus Torvalds | 2018-10-23 13:08:53 +0100 |
---|---|---|
committer | Linus Torvalds | 2018-10-23 13:08:53 +0100 |
commit | 0200fbdd431519d730b5d399a12840ec832b27cc (patch) | |
tree | 2b58f9e24b61b00e0550f106c95bfabc3b52cfdd /arch/s390 | |
parent | de3fbb2aa802a267dee2213ae7d5a1e19eb4294a (diff) | |
parent | 01a14bda11add9dcd4a59200f13834d634559935 (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking and misc x86 updates from Ingo Molnar:
"Lots of changes in this cycle - in part because locking/core attracted
a number of related x86 low level work which was easier to handle in a
single tree:
- Linux Kernel Memory Consistency Model updates (Alan Stern, Paul E.
McKenney, Andrea Parri)
- lockdep scalability improvements and micro-optimizations (Waiman
Long)
- rwsem improvements (Waiman Long)
- spinlock micro-optimization (Matthew Wilcox)
- qspinlocks: Provide a liveness guarantee (more fairness) on x86.
(Peter Zijlstra)
- Add support for relative references in jump tables on arm64, x86
and s390 to optimize jump labels (Ard Biesheuvel, Heiko Carstens)
- Be a lot less permissive on weird (kernel address) uaccess faults
on x86: BUG() when uaccess helpers fault on kernel addresses (Jann
Horn)
- macrofy x86 asm statements to un-confuse the GCC inliner. (Nadav
Amit)
- ... and a handful of other smaller changes as well"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (57 commits)
locking/lockdep: Make global debug_locks* variables read-mostly
locking/lockdep: Fix debug_locks off performance problem
locking/pvqspinlock: Extend node size when pvqspinlock is configured
locking/qspinlock_stat: Count instances of nested lock slowpaths
locking/qspinlock, x86: Provide liveness guarantee
x86/asm: 'Simplify' GEN_*_RMWcc() macros
locking/qspinlock: Rework some comments
locking/qspinlock: Re-order code
locking/lockdep: Remove duplicated 'lock_class_ops' percpu array
x86/defconfig: Enable CONFIG_USB_XHCI_HCD=y
futex: Replace spin_is_locked() with lockdep
locking/lockdep: Make class->ops a percpu counter and move it under CONFIG_DEBUG_LOCKDEP=y
x86/jump-labels: Macrofy inline assembly code to work around GCC inlining bugs
x86/cpufeature: Macrofy inline assembly code to work around GCC inlining bugs
x86/extable: Macrofy inline assembly code to work around GCC inlining bugs
x86/paravirt: Work around GCC inlining bugs when compiling paravirt ops
x86/bug: Macrofy the BUG table section handling, to work around GCC inlining bugs
x86/alternatives: Macrofy lock prefixes to work around GCC inlining bugs
x86/refcount: Work around GCC inlining bug
x86/objtool: Use asm macros to work around GCC inlining bugs
...
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/Kconfig | 1 | ||||
-rw-r--r-- | arch/s390/include/asm/jump_label.h | 40 | ||||
-rw-r--r-- | arch/s390/kernel/jump_label.c | 11 | ||||
-rw-r--r-- | arch/s390/kernel/vmlinux.lds.S | 1 |
4 files changed, 24 insertions, 29 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index cc8313550493..039a3417dfc4 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -126,6 +126,7 @@ config S390 select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_JUMP_LABEL + select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES select HAVE_ARCH_SECCOMP_FILTER diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h index 40f651292aa7..e2d3e6c43395 100644 --- a/arch/s390/include/asm/jump_label.h +++ b/arch/s390/include/asm/jump_label.h @@ -14,41 +14,33 @@ * We use a brcl 0,2 instruction for jump labels at compile time so it * can be easily distinguished from a hotpatch generated instruction. */ -static __always_inline bool arch_static_branch(struct static_key *key, bool branch) +static inline bool arch_static_branch(struct static_key *key, bool branch) { - asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n" - ".pushsection __jump_table, \"aw\"\n" - ".balign 8\n" - ".quad 0b, %l[label], %0\n" - ".popsection\n" - : : "X" (&((char *)key)[branch]) : : label); - + asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n" + ".pushsection __jump_table,\"aw\"\n" + ".balign 8\n" + ".long 0b-.,%l[label]-.\n" + ".quad %0-.\n" + ".popsection\n" + : : "X" (&((char *)key)[branch]) : : label); return false; label: return true; } -static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +static inline bool arch_static_branch_jump(struct static_key *key, bool branch) { - asm_volatile_goto("0: brcl 15, %l[label]\n" - ".pushsection __jump_table, \"aw\"\n" - ".balign 8\n" - ".quad 0b, %l[label], %0\n" - ".popsection\n" - : : "X" (&((char *)key)[branch]) : : label); - + asm_volatile_goto("0: brcl 15,%l[label]\n" + ".pushsection __jump_table,\"aw\"\n" + ".balign 8\n" + ".long 0b-.,%l[label]-.\n" + ".quad %0-.\n" + ".popsection\n" + : : "X" (&((char *)key)[branch]) : : label); return false; label: return true; } -typedef unsigned long jump_label_t; - -struct jump_entry { - jump_label_t code; - jump_label_t target; - jump_label_t key; -}; - #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index 43f8430fb67d..50a1798604a8 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c @@ -33,13 +33,13 @@ static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn) { /* brcl 15,offset */ insn->opcode = 0xc0f4; - insn->offset = (entry->target - entry->code) >> 1; + insn->offset = (jump_entry_target(entry) - jump_entry_code(entry)) >> 1; } static void jump_label_bug(struct jump_entry *entry, struct insn *expected, struct insn *new) { - unsigned char *ipc = (unsigned char *)entry->code; + unsigned char *ipc = (unsigned char *)jump_entry_code(entry); unsigned char *ipe = (unsigned char *)expected; unsigned char *ipn = (unsigned char *)new; @@ -59,6 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry, enum jump_label_type type, int init) { + void *code = (void *)jump_entry_code(entry); struct insn old, new; if (type == JUMP_LABEL_JMP) { @@ -69,13 +70,13 @@ static void __jump_label_transform(struct jump_entry *entry, jump_label_make_nop(entry, &new); } if (init) { - if (memcmp((void *)entry->code, &orignop, sizeof(orignop))) + if (memcmp(code, &orignop, sizeof(orignop))) jump_label_bug(entry, &orignop, &new); } else { - if (memcmp((void *)entry->code, &old, sizeof(old))) + if (memcmp(code, &old, sizeof(old))) jump_label_bug(entry, &old, &new); } - s390_kernel_write((void *)entry->code, &new, sizeof(new)); + s390_kernel_write(code, &new, sizeof(new)); } static int __sm_arch_jump_label_transform(void *data) diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index cc3cbdc93d35..21eb7407d51b 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -65,6 +65,7 @@ SECTIONS __start_ro_after_init = .; .data..ro_after_init : { *(.data..ro_after_init) + JUMP_TABLE_DATA } EXCEPTION_TABLE(16) . = ALIGN(PAGE_SIZE); |