diff options
author | Andy Lutomirski | 2016-01-31 09:33:26 -0800 |
---|---|---|
committer | Ingo Molnar | 2016-02-01 08:53:25 +0100 |
commit | b7765086b7c5a5be029a739c2caa161da51c2076 (patch) | |
tree | 6dcdee4ed261c942de058732c818f843701ed63b /arch | |
parent | 8c725306993198f845038dc9e45a1267099867a6 (diff) |
x86/entry/64: Fix an IRQ state error on ptregs-using syscalls
I messed up the IRQ state when jumping off the fast path due to
invocation of a ptregs-using syscall. This bug shouldn't have
had any impact yet, but it would have caused problems with
subsequent context tracking cleanups.
Reported-and-tested-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: 1e423bff959e x86/entry/64: ("Migrate the 64-bit syscall slow path to C")
Link: http://lkml.kernel.org/r/ab92cd365fb7b0a56869e920017790d96610fdca.1454261517.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/entry/entry_64.S | 19 |
1 files changed, 13 insertions, 6 deletions
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 567aa522ac0a..9f7bb808035e 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -191,8 +191,8 @@ entry_SYSCALL_64_fastpath: /* * This call instruction is handled specially in stub_ptregs_64. - * It might end up jumping to the slow path. If it jumps, RAX is - * clobbered. + * It might end up jumping to the slow path. If it jumps, RAX + * and all argument registers are clobbered. */ call *sys_call_table(, %rax, 8) .Lentry_SYSCALL_64_after_fastpath_call: @@ -315,17 +315,24 @@ END(entry_SYSCALL_64) ENTRY(stub_ptregs_64) /* * Syscalls marked as needing ptregs land here. - * If we are on the fast path, we need to save the extra regs. - * If we are on the slow path, the extra regs are already saved. + * If we are on the fast path, we need to save the extra regs, + * which we achieve by trying again on the slow path. If we are on + * the slow path, the extra regs are already saved. * * RAX stores a pointer to the C function implementing the syscall. + * IRQs are on. */ cmpq $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp) jne 1f - /* Called from fast path -- pop return address and jump to slow path */ + /* + * Called from fast path -- disable IRQs again, pop return address + * and jump to slow path + */ + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF popq %rax - jmp entry_SYSCALL64_slow_path /* called from fast path */ + jmp entry_SYSCALL64_slow_path 1: /* Called from C */ |