diff options
Diffstat (limited to 'debian/patches-rt/arm64-KVM-compute_layout-before-altenates-are-applie.patch')
-rw-r--r-- | debian/patches-rt/arm64-KVM-compute_layout-before-altenates-are-applie.patch | 80 |
1 files changed, 49 insertions, 31 deletions
diff --git a/debian/patches-rt/arm64-KVM-compute_layout-before-altenates-are-applie.patch b/debian/patches-rt/arm64-KVM-compute_layout-before-altenates-are-applie.patch index 2f50fa1f3..e896f4cad 100644 --- a/debian/patches-rt/arm64-KVM-compute_layout-before-altenates-are-applie.patch +++ b/debian/patches-rt/arm64-KVM-compute_layout-before-altenates-are-applie.patch @@ -1,45 +1,62 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Thu, 26 Jul 2018 09:13:42 +0200 -Subject: [PATCH] arm64: KVM: compute_layout before altenates are applied -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2.17-rt9.tar.xz +Subject: [PATCH] arm64: KVM: Invoke compute_layout() before alternatives are + applied +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.4/older/patches-5.4.3-rt1.tar.xz compute_layout() is invoked as part of an alternative fixup under -stop_machine() and needs a sleeping lock as part of get_random_long(). +stop_machine(). This function invokes get_random_long() which acquires a +sleeping lock on -RT which can not be acquired in this context. -Invoke compute_layout() before the alternatives are applied. +Rename compute_layout() to kvm_compute_layout() and invoke it before +stop_machine() applies the alternatives. Add a __init prefix to +kvm_compute_layout() because the caller has it, too (and so the code can be +discarded after boot). Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- - arch/arm64/include/asm/alternative.h | 6 ++++++ - arch/arm64/kernel/alternative.c | 1 + - arch/arm64/kvm/va_layout.c | 7 +------ - 3 files changed, 8 insertions(+), 6 deletions(-) + arch/arm64/include/asm/kvm_mmu.h | 1 + + arch/arm64/kernel/smp.c | 4 ++++ + arch/arm64/kvm/va_layout.c | 8 +------- + 3 files changed, 6 insertions(+), 7 deletions(-) ---- a/arch/arm64/include/asm/alternative.h -+++ b/arch/arm64/include/asm/alternative.h -@@ -35,6 +35,12 @@ void apply_alternatives_module(void *sta - static inline void apply_alternatives_module(void *start, size_t length) { } - #endif +--- a/arch/arm64/include/asm/kvm_mmu.h ++++ b/arch/arm64/include/asm/kvm_mmu.h +@@ -91,6 +91,7 @@ alternative_cb_end -+#ifdef CONFIG_KVM_ARM_HOST + void kvm_update_va_mask(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst); +void kvm_compute_layout(void); -+#else -+static inline void kvm_compute_layout(void) { } -+#endif -+ - #define ALTINSTR_ENTRY(feature,cb) \ - " .word 661b - .\n" /* label */ \ - " .if " __stringify(cb) " == 0\n" \ ---- a/arch/arm64/kernel/alternative.c -+++ b/arch/arm64/kernel/alternative.c -@@ -238,6 +238,7 @@ static int __apply_alternatives_multi_st - void __init apply_alternatives_all(void) + + static inline unsigned long __kern_hyp_va(unsigned long v) { - /* better not try code patching on a live SMP system */ -+ kvm_compute_layout(); - stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask); +--- a/arch/arm64/kernel/smp.c ++++ b/arch/arm64/kernel/smp.c +@@ -31,6 +31,7 @@ + #include <linux/of.h> + #include <linux/irq_work.h> + #include <linux/kexec.h> ++#include <linux/kvm_host.h> + + #include <asm/alternative.h> + #include <asm/atomic.h> +@@ -39,6 +40,7 @@ + #include <asm/cputype.h> + #include <asm/cpu_ops.h> + #include <asm/daifflags.h> ++#include <asm/kvm_mmu.h> + #include <asm/mmu_context.h> + #include <asm/numa.h> + #include <asm/pgtable.h> +@@ -408,6 +410,8 @@ static void __init hyp_mode_check(void) + "CPU: CPUs started in inconsistent modes"); + else + pr_info("CPU: All CPU(s) started at EL1\n"); ++ if (IS_ENABLED(CONFIG_KVM_ARM_HOST)) ++ kvm_compute_layout(); } + void __init smp_cpus_done(unsigned int max_cpus) --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -22,7 +22,7 @@ static u8 tag_lsb; @@ -51,16 +68,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start); u64 hyp_va_msb; -@@ -110,8 +110,6 @@ void __init kvm_update_va_mask(struct al +@@ -110,9 +110,6 @@ void __init kvm_update_va_mask(struct al BUG_ON(nr_inst != 5); - if (!has_vhe() && !va_mask) - compute_layout(); - +- for (i = 0; i < nr_inst; i++) { u32 rd, rn, insn, oinsn; -@@ -156,9 +154,6 @@ void kvm_patch_vector_branch(struct alt_ + +@@ -156,9 +153,6 @@ void kvm_patch_vector_branch(struct alt_ return; } |