diff options
Diffstat (limited to 'debian/patches/bugfix/x86/retbleed/0046-KVM-VMX-Prevent-RSB-underflow-before-vmenter.patch')
-rw-r--r-- | debian/patches/bugfix/x86/retbleed/0046-KVM-VMX-Prevent-RSB-underflow-before-vmenter.patch | 182 |
1 files changed, 182 insertions, 0 deletions
diff --git a/debian/patches/bugfix/x86/retbleed/0046-KVM-VMX-Prevent-RSB-underflow-before-vmenter.patch b/debian/patches/bugfix/x86/retbleed/0046-KVM-VMX-Prevent-RSB-underflow-before-vmenter.patch new file mode 100644 index 000000000..860b76f25 --- /dev/null +++ b/debian/patches/bugfix/x86/retbleed/0046-KVM-VMX-Prevent-RSB-underflow-before-vmenter.patch @@ -0,0 +1,182 @@ +From: Josh Poimboeuf <jpoimboe@kernel.org> +Date: Tue, 14 Jun 2022 23:16:16 +0200 +Subject: KVM: VMX: Prevent RSB underflow before vmenter +Origin: https://git.kernel.org/linus/07853adc29a058c5fd143c14e5ac528448a72ed9 + +On VMX, there are some balanced returns between the time the guest's +SPEC_CTRL value is written, and the vmenter. + +Balanced returns (matched by a preceding call) are usually ok, but it's +at least theoretically possible an NMI with a deep call stack could +empty the RSB before one of the returns. + +For maximum paranoia, don't allow *any* returns (balanced or otherwise) +between the SPEC_CTRL write and the vmenter. + + [ bp: Fix 32-bit build. ] + +Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org> +Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> +Signed-off-by: Borislav Petkov <bp@suse.de> +--- + arch/x86/kernel/asm-offsets.c | 6 ++++++ + arch/x86/kernel/cpu/bugs.c | 4 ++-- + arch/x86/kvm/vmx/capabilities.h | 4 ++-- + arch/x86/kvm/vmx/vmenter.S | 29 +++++++++++++++++++++++++++++ + arch/x86/kvm/vmx/vmx.c | 8 -------- + arch/x86/kvm/vmx/vmx.h | 4 ++-- + arch/x86/kvm/vmx/vmx_ops.h | 2 +- + 7 files changed, 42 insertions(+), 15 deletions(-) + +diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c +index 437308004ef2..cb50589a7102 100644 +--- a/arch/x86/kernel/asm-offsets.c ++++ b/arch/x86/kernel/asm-offsets.c +@@ -19,6 +19,7 @@ + #include <asm/suspend.h> + #include <asm/tlbflush.h> + #include <asm/tdx.h> ++#include "../kvm/vmx/vmx.h" + + #ifdef CONFIG_XEN + #include <xen/interface/xen.h> +@@ -107,4 +108,9 @@ static void __used common(void) + OFFSET(TSS_sp0, tss_struct, x86_tss.sp0); + OFFSET(TSS_sp1, tss_struct, x86_tss.sp1); + OFFSET(TSS_sp2, tss_struct, x86_tss.sp2); ++ ++ if (IS_ENABLED(CONFIG_KVM_INTEL)) { ++ BLANK(); ++ OFFSET(VMX_spec_ctrl, vcpu_vmx, spec_ctrl); ++ } + } +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index fcbd072a5e36..182f8b2e8a3c 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -196,8 +196,8 @@ void __init check_bugs(void) + } + + /* +- * NOTE: For VMX, this function is not called in the vmexit path. +- * It uses vmx_spec_ctrl_restore_host() instead. ++ * NOTE: This function is *only* called for SVM. VMX spec_ctrl handling is ++ * done in vmenter.S. + */ + void + x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) +diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h +index 3f430e218375..c0e24826a86f 100644 +--- a/arch/x86/kvm/vmx/capabilities.h ++++ b/arch/x86/kvm/vmx/capabilities.h +@@ -4,8 +4,8 @@ + + #include <asm/vmx.h> + +-#include "lapic.h" +-#include "x86.h" ++#include "../lapic.h" ++#include "../x86.h" + + extern bool __read_mostly enable_vpid; + extern bool __read_mostly flexpriority_enabled; +diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S +index 4c743fa98a1f..4182c7ffc909 100644 +--- a/arch/x86/kvm/vmx/vmenter.S ++++ b/arch/x86/kvm/vmx/vmenter.S +@@ -1,9 +1,11 @@ + /* SPDX-License-Identifier: GPL-2.0 */ + #include <linux/linkage.h> + #include <asm/asm.h> ++#include <asm/asm-offsets.h> + #include <asm/bitsperlong.h> + #include <asm/kvm_vcpu_regs.h> + #include <asm/nospec-branch.h> ++#include <asm/percpu.h> + #include <asm/segment.h> + #include "run_flags.h" + +@@ -73,6 +75,33 @@ SYM_FUNC_START(__vmx_vcpu_run) + lea (%_ASM_SP), %_ASM_ARG2 + call vmx_update_host_rsp + ++ ALTERNATIVE "jmp .Lspec_ctrl_done", "", X86_FEATURE_MSR_SPEC_CTRL ++ ++ /* ++ * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the ++ * host's, write the MSR. ++ * ++ * IMPORTANT: To avoid RSB underflow attacks and any other nastiness, ++ * there must not be any returns or indirect branches between this code ++ * and vmentry. ++ */ ++ mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI ++ movl VMX_spec_ctrl(%_ASM_DI), %edi ++ movl PER_CPU_VAR(x86_spec_ctrl_current), %esi ++ cmp %edi, %esi ++ je .Lspec_ctrl_done ++ mov $MSR_IA32_SPEC_CTRL, %ecx ++ xor %edx, %edx ++ mov %edi, %eax ++ wrmsr ++ ++.Lspec_ctrl_done: ++ ++ /* ++ * Since vmentry is serializing on affected CPUs, there's no need for ++ * an LFENCE to stop speculation from skipping the wrmsr. ++ */ ++ + /* Load @regs to RAX. */ + mov (%_ASM_SP), %_ASM_AX + +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index 421db354b1ab..be7c19374fdd 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -6988,14 +6988,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) + + kvm_wait_lapic_expire(vcpu); + +- /* +- * If this vCPU has touched SPEC_CTRL, restore the guest's value if +- * it's non-zero. Since vmentry is serialising on affected CPUs, there +- * is no need to worry about the conditional branch over the wrmsr +- * being speculatively taken. +- */ +- x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); +- + /* The actual VMENTER/EXIT is in the .noinstr.text section. */ + vmx_vcpu_enter_exit(vcpu, vmx, __vmx_vcpu_run_flags(vmx)); + +diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h +index da654af12ccb..1e7f9453894b 100644 +--- a/arch/x86/kvm/vmx/vmx.h ++++ b/arch/x86/kvm/vmx/vmx.h +@@ -8,11 +8,11 @@ + #include <asm/intel_pt.h> + + #include "capabilities.h" +-#include "kvm_cache_regs.h" ++#include "../kvm_cache_regs.h" + #include "posted_intr.h" + #include "vmcs.h" + #include "vmx_ops.h" +-#include "cpuid.h" ++#include "../cpuid.h" + #include "run_flags.h" + + #define MSR_TYPE_R 1 +diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h +index 5e7f41225780..5cfc49ddb1b4 100644 +--- a/arch/x86/kvm/vmx/vmx_ops.h ++++ b/arch/x86/kvm/vmx/vmx_ops.h +@@ -8,7 +8,7 @@ + + #include "evmcs.h" + #include "vmcs.h" +-#include "x86.h" ++#include "../x86.h" + + asmlinkage void vmread_error(unsigned long field, bool fault); + __attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field, |