summaryrefslogtreecommitdiffstats
path: root/debian/patches/bugfix/x86/retbleed/0011-x86-kvm-Fix-SETcc-emulation-for-return-thunks.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches/bugfix/x86/retbleed/0011-x86-kvm-Fix-SETcc-emulation-for-return-thunks.patch')
-rw-r--r--debian/patches/bugfix/x86/retbleed/0011-x86-kvm-Fix-SETcc-emulation-for-return-thunks.patch89
1 files changed, 89 insertions, 0 deletions
diff --git a/debian/patches/bugfix/x86/retbleed/0011-x86-kvm-Fix-SETcc-emulation-for-return-thunks.patch b/debian/patches/bugfix/x86/retbleed/0011-x86-kvm-Fix-SETcc-emulation-for-return-thunks.patch
new file mode 100644
index 000000000..bf89ec7ce
--- /dev/null
+++ b/debian/patches/bugfix/x86/retbleed/0011-x86-kvm-Fix-SETcc-emulation-for-return-thunks.patch
@@ -0,0 +1,89 @@
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 14 Jun 2022 23:15:42 +0200
+Subject: x86/kvm: Fix SETcc emulation for return thunks
+Origin: https://git.kernel.org/linus/af2e140f34208a5dfb6b7a8ad2d56bda88f0524d
+
+Prepare the SETcc fastop stuff for when RET can be larger still.
+
+The tricky bit here is that the expressions should not only be
+constant C expressions, but also absolute GAS expressions. This means
+no ?: and 'true' is ~0.
+
+Also ensure em_setcc() has the same alignment as the actual FOP_SETCC()
+ops, this ensures there cannot be an alignment hole between em_setcc()
+and the first op.
+
+Additionally, add a .skip directive to the FOP_SETCC() macro to fill
+any remaining space with INT3 traps; however the primary purpose of
+this directive is to generate AS warnings when the remaining space
+goes negative. Which is a very good indication the alignment magic
+went side-ways.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kvm/emulate.c | 28 +++++++++++++++-------------
+ 1 file changed, 15 insertions(+), 13 deletions(-)
+
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 89b11e7dca8a..b01437015f99 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -325,13 +325,15 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
+ #define FOP_RET(name) \
+ __FOP_RET(#name)
+
+-#define FOP_START(op) \
++#define __FOP_START(op, align) \
+ extern void em_##op(struct fastop *fake); \
+ asm(".pushsection .text, \"ax\" \n\t" \
+ ".global em_" #op " \n\t" \
+- ".align " __stringify(FASTOP_SIZE) " \n\t" \
++ ".align " __stringify(align) " \n\t" \
+ "em_" #op ":\n\t"
+
++#define FOP_START(op) __FOP_START(op, FASTOP_SIZE)
++
+ #define FOP_END \
+ ".popsection")
+
+@@ -435,16 +437,15 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
+ /*
+ * Depending on .config the SETcc functions look like:
+ *
+- * ENDBR [4 bytes; CONFIG_X86_KERNEL_IBT]
+- * SETcc %al [3 bytes]
+- * RET [1 byte]
+- * INT3 [1 byte; CONFIG_SLS]
+- *
+- * Which gives possible sizes 4, 5, 8 or 9. When rounded up to the
+- * next power-of-two alignment they become 4, 8 or 16 resp.
++ * ENDBR [4 bytes; CONFIG_X86_KERNEL_IBT]
++ * SETcc %al [3 bytes]
++ * RET | JMP __x86_return_thunk [1,5 bytes; CONFIG_RETPOLINE]
++ * INT3 [1 byte; CONFIG_SLS]
+ */
+-#define SETCC_LENGTH (ENDBR_INSN_SIZE + 4 + IS_ENABLED(CONFIG_SLS))
+-#define SETCC_ALIGN (4 << IS_ENABLED(CONFIG_SLS) << HAS_KERNEL_IBT)
++#define RET_LENGTH (1 + (4 * IS_ENABLED(CONFIG_RETPOLINE)) + \
++ IS_ENABLED(CONFIG_SLS))
++#define SETCC_LENGTH (ENDBR_INSN_SIZE + 3 + RET_LENGTH)
++#define SETCC_ALIGN (4 << ((SETCC_LENGTH > 4) & 1) << ((SETCC_LENGTH > 8) & 1))
+ static_assert(SETCC_LENGTH <= SETCC_ALIGN);
+
+ #define FOP_SETCC(op) \
+@@ -453,9 +454,10 @@ static_assert(SETCC_LENGTH <= SETCC_ALIGN);
+ #op ": \n\t" \
+ ASM_ENDBR \
+ #op " %al \n\t" \
+- __FOP_RET(#op)
++ __FOP_RET(#op) \
++ ".skip " __stringify(SETCC_ALIGN) " - (.-" #op "), 0xcc \n\t"
+
+-FOP_START(setcc)
++__FOP_START(setcc, SETCC_ALIGN)
+ FOP_SETCC(seto)
+ FOP_SETCC(setno)
+ FOP_SETCC(setc)