summaryrefslogtreecommitdiffstats
path: root/debian/patches/bugfix/x86/retbleed/0045-x86-speculation-Fill-RSB-on-vmexit-for-IBRS.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches/bugfix/x86/retbleed/0045-x86-speculation-Fill-RSB-on-vmexit-for-IBRS.patch')
-rw-r--r--debian/patches/bugfix/x86/retbleed/0045-x86-speculation-Fill-RSB-on-vmexit-for-IBRS.patch133
1 files changed, 133 insertions, 0 deletions
diff --git a/debian/patches/bugfix/x86/retbleed/0045-x86-speculation-Fill-RSB-on-vmexit-for-IBRS.patch b/debian/patches/bugfix/x86/retbleed/0045-x86-speculation-Fill-RSB-on-vmexit-for-IBRS.patch
new file mode 100644
index 000000000..a5395236a
--- /dev/null
+++ b/debian/patches/bugfix/x86/retbleed/0045-x86-speculation-Fill-RSB-on-vmexit-for-IBRS.patch
@@ -0,0 +1,133 @@
+From: Josh Poimboeuf <jpoimboe@kernel.org>
+Date: Tue, 14 Jun 2022 23:16:15 +0200
+Subject: x86/speculation: Fill RSB on vmexit for IBRS
+Origin: https://git.kernel.org/linus/9756bba28470722dacb79ffce554336dd1f6a6cd
+
+Prevent RSB underflow/poisoning attacks with RSB. While at it, add a
+bunch of comments to attempt to document the current state of tribal
+knowledge about RSB attacks and what exactly is being mitigated.
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/cpufeatures.h | 2 +-
+ arch/x86/kernel/cpu/bugs.c | 63 +++++++++++++++++++++++++++---
+ arch/x86/kvm/vmx/vmenter.S | 6 +--
+ 3 files changed, 62 insertions(+), 9 deletions(-)
+
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 09dce77f4848..d143f018eda1 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -204,7 +204,7 @@
+ #define X86_FEATURE_XCOMPACTED ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */
+ #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
+ #define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */
+-/* FREE! ( 7*32+13) */
++#define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */
+ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
+ #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
+ #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 31ccb7852afd..fcbd072a5e36 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1401,16 +1401,69 @@ static void __init spectre_v2_select_mitigation(void)
+ pr_info("%s\n", spectre_v2_strings[mode]);
+
+ /*
+- * If spectre v2 protection has been enabled, unconditionally fill
+- * RSB during a context switch; this protects against two independent
+- * issues:
++ * If Spectre v2 protection has been enabled, fill the RSB during a
++ * context switch. In general there are two types of RSB attacks
++ * across context switches, for which the CALLs/RETs may be unbalanced.
+ *
+- * - RSB underflow (and switch to BTB) on Skylake+
+- * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
++ * 1) RSB underflow
++ *
++ * Some Intel parts have "bottomless RSB". When the RSB is empty,
++ * speculated return targets may come from the branch predictor,
++ * which could have a user-poisoned BTB or BHB entry.
++ *
++ * AMD has it even worse: *all* returns are speculated from the BTB,
++ * regardless of the state of the RSB.
++ *
++ * When IBRS or eIBRS is enabled, the "user -> kernel" attack
++ * scenario is mitigated by the IBRS branch prediction isolation
++ * properties, so the RSB buffer filling wouldn't be necessary to
++ * protect against this type of attack.
++ *
++ * The "user -> user" attack scenario is mitigated by RSB filling.
++ *
++ * 2) Poisoned RSB entry
++ *
++ * If the 'next' in-kernel return stack is shorter than 'prev',
++ * 'next' could be tricked into speculating with a user-poisoned RSB
++ * entry.
++ *
++ * The "user -> kernel" attack scenario is mitigated by SMEP and
++ * eIBRS.
++ *
++ * The "user -> user" scenario, also known as SpectreBHB, requires
++ * RSB clearing.
++ *
++ * So to mitigate all cases, unconditionally fill RSB on context
++ * switches.
++ *
++ * FIXME: Is this pointless for retbleed-affected AMD?
+ */
+ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+ pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
+
++ /*
++ * Similar to context switches, there are two types of RSB attacks
++ * after vmexit:
++ *
++ * 1) RSB underflow
++ *
++ * 2) Poisoned RSB entry
++ *
++ * When retpoline is enabled, both are mitigated by filling/clearing
++ * the RSB.
++ *
++ * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
++ * prediction isolation protections, RSB still needs to be cleared
++ * because of #2. Note that SMEP provides no protection here, unlike
++ * user-space-poisoned RSB entries.
++ *
++ * eIBRS, on the other hand, has RSB-poisoning protections, so it
++ * doesn't need RSB clearing after vmexit.
++ */
++ if (boot_cpu_has(X86_FEATURE_RETPOLINE) ||
++ boot_cpu_has(X86_FEATURE_KERNEL_IBRS))
++ setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
++
+ /*
+ * Retpoline protects the kernel, but doesn't protect firmware. IBRS
+ * and Enhanced IBRS protect firmware too, so enable IBRS around
+diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
+index 8641ea74a307..4c743fa98a1f 100644
+--- a/arch/x86/kvm/vmx/vmenter.S
++++ b/arch/x86/kvm/vmx/vmenter.S
+@@ -194,15 +194,15 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
+ * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before
+ * the first unbalanced RET after vmexit!
+ *
+- * For retpoline, RSB filling is needed to prevent poisoned RSB entries
+- * and (in some cases) RSB underflow.
++ * For retpoline or IBRS, RSB filling is needed to prevent poisoned RSB
++ * entries and (in some cases) RSB underflow.
+ *
+ * eIBRS has its own protection against poisoned RSB, so it doesn't
+ * need the RSB filling sequence. But it does need to be enabled
+ * before the first unbalanced RET.
+ */
+
+- FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
++ FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
+
+ pop %_ASM_ARG2 /* @flags */
+ pop %_ASM_ARG1 /* @vmx */