From bd49e16e3339f052fae05fb3e955c5db0c9c6445 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 26 Jun 2019 21:45:03 -0700 Subject: x86/vsyscall: Add a new vsyscall=xonly mode With vsyscall emulation on, a readable vsyscall page is still exposed that contains syscall instructions that validly implement the vsyscalls. This is required because certain dynamic binary instrumentation tools attempt to read the call targets of call instructions in the instrumented code. If the instrumented code uses vsyscalls, then the vsyscall page needs to contain readable code. Unfortunately, leaving readable memory at a deterministic address can be used to help various ASLR bypasses, so some hardening value can be gained by disallowing vsyscall reads. Given how rarely the vsyscall page needs to be readable, add a mechanism to make the vsyscall page be execute only. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Kees Cook Cc: Florian Weimer Cc: Jann Horn Cc: Borislav Petkov Cc: Kernel Hardening Cc: Peter Zijlstra Link: https://lkml.kernel.org/r/d17655777c21bc09a7af1bbcf74e6f2b69a51152.1561610354.git.luto@kernel.org --- arch/x86/Kconfig | 33 ++++++++++++++++++++++++--------- arch/x86/entry/vsyscall/vsyscall_64.c | 16 ++++++++++++++-- 2 files changed, 38 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 2bbbd4d1ba31..0182d2c67590 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2293,23 +2293,38 @@ choice it can be used to assist security vulnerability exploitation. This setting can be changed at boot time via the kernel command - line parameter vsyscall=[emulate|none]. + line parameter vsyscall=[emulate|xonly|none]. On a system with recent enough glibc (2.14 or newer) and no static binaries, you can say None without a performance penalty to improve security. - If unsure, select "Emulate". + If unsure, select "Emulate execution only". config LEGACY_VSYSCALL_EMULATE - bool "Emulate" + bool "Full emulation" help - The kernel traps and emulates calls into the fixed - vsyscall address mapping. This makes the mapping - non-executable, but it still contains known contents, - which could be used in certain rare security vulnerability - exploits. This configuration is recommended when userspace - still uses the vsyscall area. + The kernel traps and emulates calls into the fixed vsyscall + address mapping. This makes the mapping non-executable, but + it still contains readable known contents, which could be + used in certain rare security vulnerability exploits. This + configuration is recommended when using legacy userspace + that still uses vsyscalls along with legacy binary + instrumentation tools that require code to be readable. + + An example of this type of legacy userspace is running + Pin on an old binary that still uses vsyscalls. + + config LEGACY_VSYSCALL_XONLY + bool "Emulate execution only" + help + The kernel traps and emulates calls into the fixed vsyscall + address mapping and does not allow reads. This + configuration is recommended when userspace might use the + legacy vsyscall area but support for legacy binary + instrumentation of legacy code is not needed. It mitigates + certain uses of the vsyscall area as an ASLR-bypassing + buffer. config LEGACY_VSYSCALL_NONE bool "None" diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index d9d81ad7a400..fedd7628f3a6 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -42,9 +42,11 @@ #define CREATE_TRACE_POINTS #include "vsyscall_trace.h" -static enum { EMULATE, NONE } vsyscall_mode = +static enum { EMULATE, XONLY, NONE } vsyscall_mode = #ifdef CONFIG_LEGACY_VSYSCALL_NONE NONE; +#elif defined(CONFIG_LEGACY_VSYSCALL_XONLY) + XONLY; #else EMULATE; #endif @@ -54,6 +56,8 @@ static int __init vsyscall_setup(char *str) if (str) { if (!strcmp("emulate", str)) vsyscall_mode = EMULATE; + else if (!strcmp("xonly", str)) + vsyscall_mode = XONLY; else if (!strcmp("none", str)) vsyscall_mode = NONE; else @@ -357,12 +361,20 @@ void __init map_vsyscall(void) extern char __vsyscall_page; unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page); - if (vsyscall_mode != NONE) { + /* + * For full emulation, the page needs to exist for real. In + * execute-only mode, there is no PTE at all backing the vsyscall + * page. + */ + if (vsyscall_mode == EMULATE) { __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR); set_vsyscall_pgtable_user_bits(swapper_pg_dir); } + if (vsyscall_mode == XONLY) + gate_vma.vm_flags = VM_EXEC; + BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) != (unsigned long)VSYSCALL_ADDR); } -- cgit v1.2.3 From 918ce325098a4eef99daad7b6796da33cebaf03a Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 26 Jun 2019 21:45:04 -0700 Subject: x86/vsyscall: Show something useful on a read fault Just segfaulting the application when it tries to read the vsyscall page in xonly mode is not helpful for those who need to debug it. Emit a hint. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Kees Cook Cc: Florian Weimer Cc: Jann Horn Link: https://lkml.kernel.org/r/8016afffe0eab497be32017ad7f6f7030dc3ba66.1561610354.git.luto@kernel.org --- arch/x86/entry/vsyscall/vsyscall_64.c | 19 ++++++++++++++++++- arch/x86/include/asm/vsyscall.h | 6 ++++-- arch/x86/mm/fault.c | 11 +++++------ 3 files changed, 27 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index fedd7628f3a6..9c58ab807aeb 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -117,7 +117,8 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size) } } -bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) +bool emulate_vsyscall(unsigned long error_code, + struct pt_regs *regs, unsigned long address) { struct task_struct *tsk; unsigned long caller; @@ -126,6 +127,22 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) long ret; unsigned long orig_dx; + /* Write faults or kernel-privilege faults never get fixed up. */ + if ((error_code & (X86_PF_WRITE | X86_PF_USER)) != X86_PF_USER) + return false; + + if (!(error_code & X86_PF_INSTR)) { + /* Failed vsyscall read */ + if (vsyscall_mode == EMULATE) + return false; + + /* + * User code tried and failed to read the vsyscall page. + */ + warn_bad_vsyscall(KERN_INFO, regs, "vsyscall read attempt denied -- look up the vsyscall kernel parameter if you need a workaround"); + return false; + } + /* * No point in checking CS -- the only way to get here is a user mode * trap to a high address, which means that we're in 64-bit user code. diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h index b986b2ca688a..ab60a71a8dcb 100644 --- a/arch/x86/include/asm/vsyscall.h +++ b/arch/x86/include/asm/vsyscall.h @@ -13,10 +13,12 @@ extern void set_vsyscall_pgtable_user_bits(pgd_t *root); * Called on instruction fetch fault in vsyscall page. * Returns true if handled. */ -extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address); +extern bool emulate_vsyscall(unsigned long error_code, + struct pt_regs *regs, unsigned long address); #else static inline void map_vsyscall(void) {} -static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) +static inline bool emulate_vsyscall(unsigned long error_code, + struct pt_regs *regs, unsigned long address) { return false; } diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 46df4c6aae46..288a5462076f 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1369,16 +1369,15 @@ void do_user_addr_fault(struct pt_regs *regs, #ifdef CONFIG_X86_64 /* - * Instruction fetch faults in the vsyscall page might need - * emulation. The vsyscall page is at a high address - * (>PAGE_OFFSET), but is considered to be part of the user - * address space. + * Faults in the vsyscall page might need emulation. The + * vsyscall page is at a high address (>PAGE_OFFSET), but is + * considered to be part of the user address space. * * The vsyscall page does not have a "real" VMA, so do this * emulation before we go searching for VMAs. */ - if ((hw_error_code & X86_PF_INSTR) && is_vsyscall_vaddr(address)) { - if (emulate_vsyscall(regs, address)) + if (is_vsyscall_vaddr(address)) { + if (emulate_vsyscall(hw_error_code, regs, address)) return; } #endif -- cgit v1.2.3 From e0a446ce394a7915f2ffc03f9bb610c5ac4dbbf1 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 26 Jun 2019 21:45:05 -0700 Subject: x86/vsyscall: Document odd SIGSEGV error code for vsyscalls Even if vsyscall=none, user page faults on the vsyscall page are reported as though the PROT bit in the error code was set. Add a comment explaining why this is probably okay and display the value in the test case. While at it, explain why the behavior is correct with respect to PKRU. Modify also the selftest to print the odd error code so that there is a way to demonstrate the odd behaviour. If anyone really cares about more accurate emulation, the behaviour could be changed. But that needs a real good justification. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Kees Cook Cc: Florian Weimer Cc: Jann Horn Cc: Borislav Petkov Cc: Kernel Hardening Cc: Peter Zijlstra Link: https://lkml.kernel.org/r/75c91855fd850649ace162eec5495a1354221aaa.1561610354.git.luto@kernel.org --- arch/x86/mm/fault.c | 7 +++++++ tools/testing/selftests/x86/test_vsyscall.c | 9 ++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 288a5462076f..58e4f1f00bbc 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -710,6 +710,10 @@ static void set_signal_archinfo(unsigned long address, * To avoid leaking information about the kernel page * table layout, pretend that user-mode accesses to * kernel addresses are always protection faults. + * + * NB: This means that failed vsyscalls with vsyscall=none + * will have the PROT bit. This doesn't leak any + * information and does not appear to cause any problems. */ if (address >= TASK_SIZE_MAX) error_code |= X86_PF_PROT; @@ -1375,6 +1379,9 @@ void do_user_addr_fault(struct pt_regs *regs, * * The vsyscall page does not have a "real" VMA, so do this * emulation before we go searching for VMAs. + * + * PKRU never rejects instruction fetches, so we don't need + * to consider the PF_PK bit. */ if (is_vsyscall_vaddr(address)) { if (emulate_vsyscall(hw_error_code, regs, address)) diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c index 0b4f1cc2291c..4c9a8d76dba0 100644 --- a/tools/testing/selftests/x86/test_vsyscall.c +++ b/tools/testing/selftests/x86/test_vsyscall.c @@ -183,9 +183,13 @@ static inline long sys_getcpu(unsigned * cpu, unsigned * node, } static jmp_buf jmpbuf; +static volatile unsigned long segv_err; static void sigsegv(int sig, siginfo_t *info, void *ctx_void) { + ucontext_t *ctx = (ucontext_t *)ctx_void; + + segv_err = ctx->uc_mcontext.gregs[REG_ERR]; siglongjmp(jmpbuf, 1); } @@ -416,8 +420,11 @@ static int test_vsys_r(void) } else if (!can_read && should_read_vsyscall) { printf("[FAIL]\tWe don't have read access, but we should\n"); return 1; + } else if (can_read) { + printf("[OK]\tWe have read access\n"); } else { - printf("[OK]\tgot expected result\n"); + printf("[OK]\tWe do not have read access: #PF(0x%lx)\n", + segv_err); } #endif -- cgit v1.2.3 From 625b7b7f79c66626fb2b7687fc1a58309a57edd5 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 26 Jun 2019 21:45:07 -0700 Subject: x86/vsyscall: Change the default vsyscall mode to xonly The use case for full emulation over xonly is very esoteric, e.g. magic instrumentation tools. Change the default to the safer xonly mode. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Kees Cook Cc: Florian Weimer Cc: Jann Horn Cc: Borislav Petkov Cc: Kernel Hardening Cc: Peter Zijlstra Link: https://lkml.kernel.org/r/30539f8072d2376b9c9efcc07e6ed0d6bf20e882.1561610354.git.luto@kernel.org --- arch/x86/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0182d2c67590..32028edc1b0e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2285,7 +2285,7 @@ config COMPAT_VDSO choice prompt "vsyscall table for legacy applications" depends on X86_64 - default LEGACY_VSYSCALL_EMULATE + default LEGACY_VSYSCALL_XONLY help Legacy user code that does not know how to find the vDSO expects to be able to issue three syscalls by calling fixed addresses in -- cgit v1.2.3 From 441cedab2dfca18fe4983cbc795de04536ed421e Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 26 Jun 2019 21:45:08 -0700 Subject: x86/vsyscall: Add __ro_after_init to global variables The vDSO is only configurable by command-line options, so make its global variables __ro_after_init. This seems highly unlikely to ever stop an exploit, but it's nicer anyway. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Kees Cook Cc: Florian Weimer Cc: Jann Horn Cc: Borislav Petkov Cc: Kernel Hardening Cc: Peter Zijlstra Link: https://lkml.kernel.org/r/a386925835e49d319e70c4d7404b1f6c3c2e3702.1561610354.git.luto@kernel.org --- arch/x86/entry/vsyscall/vsyscall_64.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 9c58ab807aeb..07003f3f1bfc 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -42,7 +42,7 @@ #define CREATE_TRACE_POINTS #include "vsyscall_trace.h" -static enum { EMULATE, XONLY, NONE } vsyscall_mode = +static enum { EMULATE, XONLY, NONE } vsyscall_mode __ro_after_init = #ifdef CONFIG_LEGACY_VSYSCALL_NONE NONE; #elif defined(CONFIG_LEGACY_VSYSCALL_XONLY) @@ -305,7 +305,7 @@ static const char *gate_vma_name(struct vm_area_struct *vma) static const struct vm_operations_struct gate_vma_ops = { .name = gate_vma_name, }; -static struct vm_area_struct gate_vma = { +static struct vm_area_struct gate_vma __ro_after_init = { .vm_start = VSYSCALL_ADDR, .vm_end = VSYSCALL_ADDR + PAGE_SIZE, .vm_page_prot = PAGE_READONLY_EXEC, -- cgit v1.2.3