summaryrefslogtreecommitdiffstats
path: root/debian/patches/bugfix/x86/retbleed/0013-x86-sev-Avoid-using-__x86_return_thunk.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches/bugfix/x86/retbleed/0013-x86-sev-Avoid-using-__x86_return_thunk.patch')
-rw-r--r--debian/patches/bugfix/x86/retbleed/0013-x86-sev-Avoid-using-__x86_return_thunk.patch43
1 files changed, 43 insertions, 0 deletions
diff --git a/debian/patches/bugfix/x86/retbleed/0013-x86-sev-Avoid-using-__x86_return_thunk.patch b/debian/patches/bugfix/x86/retbleed/0013-x86-sev-Avoid-using-__x86_return_thunk.patch
new file mode 100644
index 000000000..508441811
--- /dev/null
+++ b/debian/patches/bugfix/x86/retbleed/0013-x86-sev-Avoid-using-__x86_return_thunk.patch
@@ -0,0 +1,43 @@
+From: Kim Phillips <kim.phillips@amd.com>
+Date: Tue, 14 Jun 2022 23:15:44 +0200
+Subject: x86/sev: Avoid using __x86_return_thunk
+Origin: https://git.kernel.org/linus/0ee9073000e8791f8b134a8ded31bcc767f7f232
+
+Specifically, it's because __enc_copy() encrypts the kernel after
+being relocated outside the kernel in sme_encrypt_execute(), and the
+RET macro's jmp offset isn't amended prior to execution.
+
+Signed-off-by: Kim Phillips <kim.phillips@amd.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/mm/mem_encrypt_boot.S | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
+index 3d1dba05fce4..d94dea450fa6 100644
+--- a/arch/x86/mm/mem_encrypt_boot.S
++++ b/arch/x86/mm/mem_encrypt_boot.S
+@@ -65,7 +65,9 @@ SYM_FUNC_START(sme_encrypt_execute)
+ movq %rbp, %rsp /* Restore original stack pointer */
+ pop %rbp
+
+- RET
++ /* Offset to __x86_return_thunk would be wrong here */
++ ret
++ int3
+ SYM_FUNC_END(sme_encrypt_execute)
+
+ SYM_FUNC_START(__enc_copy)
+@@ -151,6 +153,8 @@ SYM_FUNC_START(__enc_copy)
+ pop %r12
+ pop %r15
+
+- RET
++ /* Offset to __x86_return_thunk would be wrong here */
++ ret
++ int3
+ .L__enc_copy_end:
+ SYM_FUNC_END(__enc_copy)