aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/entry/entry_64.S
diff options
context:
space:
mode:
authorAndy Lutomirski2015-07-15 10:29:41 -0700
committerIngo Molnar2015-07-17 12:50:14 +0200
commita97439aa1aec10387797b4abae3cf117de1c90d7 (patch)
tree6350d830fed0e1a6908bc53eaa9aefff2425902b /arch/x86/entry/entry_64.S
parent36f1a77b3aa57c5c2eb1ae2d67d07c4350a78345 (diff)
x86/entry/64, x86/nmi/64: Add CONFIG_DEBUG_ENTRY NMI testing code
It turns out to be rather tedious to test the NMI nesting code. Make it easier: add a new CONFIG_DEBUG_ENTRY option that causes the NMI handler to pre-emptively unmask NMIs. With this option set, errors in the repeat_nmi logic or failures to detect that we're in a nested NMI will result in quick panics under perf (especially if multiple counters are running at high frequency) instead of requiring an unusual workload that generates page faults or breakpoints inside NMIs. I called it CONFIG_DEBUG_ENTRY instead of CONFIG_DEBUG_NMI_ENTRY because I want to add new non-NMI checks elsewhere in the entry code in the future, and I'd rather not add too many new config options or add this option and then immediately rename it. Signed-off-by: Andy Lutomirski <luto@kernel.org> Reviewed-by: Steven Rostedt <rostedt@goodmis.org> Cc: Borislav Petkov <bp@suse.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/entry/entry_64.S')
-rw-r--r--arch/x86/entry/entry_64.S15
1 files changed, 15 insertions, 0 deletions
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 5422bd20bdf4..8cb3e438f21e 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1454,6 +1454,21 @@ first_nmi:
/* Everything up to here is safe from nested NMIs */
+#ifdef CONFIG_DEBUG_ENTRY
+ /*
+ * For ease of testing, unmask NMIs right away. Disabled by
+ * default because IRET is very expensive.
+ */
+ pushq $0 /* SS */
+ pushq %rsp /* RSP (minus 8 because of the previous push) */
+ addq $8, (%rsp) /* Fix up RSP */
+ pushfq /* RFLAGS */
+ pushq $__KERNEL_CS /* CS */
+ pushq $1f /* RIP */
+ INTERRUPT_RETURN /* continues at repeat_nmi below */
+1:
+#endif
+
repeat_nmi:
/*
* If there was a nested NMI, the first NMI's iret will return