aboutsummaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorSean Christopherson2023-08-11 08:52:55 -0700
committerBorislav Petkov (AMD)2023-08-14 10:47:55 +0200
commitba5ca5e5e6a1d55923e88b4a83da452166f5560e (patch)
tree63d2e359a7efe8e7ef7c76adb0dd14b4bc35ca70 /arch/x86
parent2ccdd1b13c591d306f0401d98dedc4bdcd02b421 (diff)
x86/retpoline: Don't clobber RFLAGS during srso_safe_ret()
Use LEA instead of ADD when adjusting %rsp in srso_safe_ret{,_alias}() so as to avoid clobbering flags. Drop one of the INT3 instructions to account for the LEA consuming one more byte than the ADD. KVM's emulator makes indirect calls into a jump table of sorts, where the destination of each call is a small blob of code that performs fast emulation by executing the target instruction with fixed operands. E.g. to emulate ADC, fastop() invokes adcb_al_dl(): adcb_al_dl: <+0>: adc %dl,%al <+2>: jmp <__x86_return_thunk> A major motivation for doing fast emulation is to leverage the CPU to handle consumption and manipulation of arithmetic flags, i.e. RFLAGS is both an input and output to the target of the call. fastop() collects the RFLAGS result by pushing RFLAGS onto the stack and popping them back into a variable (held in %rdi in this case): asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n" <+71>: mov 0xc0(%r8),%rdx <+78>: mov 0x100(%r8),%rcx <+85>: push %rdi <+86>: popf <+87>: call *%rsi <+89>: nop <+90>: nop <+91>: nop <+92>: pushf <+93>: pop %rdi and then propagating the arithmetic flags into the vCPU's emulator state: ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); <+64>: and $0xfffffffffffff72a,%r9 <+94>: and $0x8d5,%edi <+109>: or %rdi,%r9 <+122>: mov %r9,0x10(%r8) The failures can be most easily reproduced by running the "emulator" test in KVM-Unit-Tests. If you're feeling a bit of deja vu, see commit b63f20a778c8 ("x86/retpoline: Don't clobber RFLAGS during CALL_NOSPEC on i386"). In addition, this breaks booting of clang-compiled guest on a gcc-compiled host where the host contains the %rsp-modifying SRSO mitigations. [ bp: Massage commit message, extend, remove addresses. ] Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") Closes: https://lore.kernel.org/all/de474347-122d-54cd-eabf-9dcc95ab9eae@amd.com Reported-by: Srikanth Aithal <sraithal@amd.com> Reported-by: Nathan Chancellor <nathan@kernel.org> Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Tested-by: Nathan Chancellor <nathan@kernel.org> Cc: stable@vger.kernel.org Link: https://lore.kernel.org/20230810013334.GA5354@dev-arch.thelio-3990X/ Link: https://lore.kernel.org/r/20230811155255.250835-1-seanjc@google.com
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/lib/retpoline.S7
1 files changed, 3 insertions, 4 deletions
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index 2cff585f22f2..132cedbf9e57 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -164,7 +164,7 @@ __EXPORT_THUNK(srso_untrain_ret_alias)
/* Needs a definition for the __x86_return_thunk alternative below. */
SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
#ifdef CONFIG_CPU_SRSO
- add $8, %_ASM_SP
+ lea 8(%_ASM_SP), %_ASM_SP
UNWIND_HINT_FUNC
#endif
ANNOTATE_UNRET_SAFE
@@ -239,7 +239,7 @@ __EXPORT_THUNK(zen_untrain_ret)
* SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret()
* above. On kernel entry, srso_untrain_ret() is executed which is a
*
- * movabs $0xccccccc308c48348,%rax
+ * movabs $0xccccc30824648d48,%rax
*
* and when the return thunk executes the inner label srso_safe_ret()
* later, it is a stack manipulation and a RET which is mispredicted and
@@ -252,11 +252,10 @@ SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
.byte 0x48, 0xb8
SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
- add $8, %_ASM_SP
+ lea 8(%_ASM_SP), %_ASM_SP
ret
int3
int3
- int3
lfence
call srso_safe_ret
int3