summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/powerpc-preempt-lazy-support.patch
diff options
context:
space:
mode:
authorSalvatore Bonaccorso <carnil@debian.org>2019-12-18 18:06:31 +0100
committerSalvatore Bonaccorso <carnil@debian.org>2019-12-18 22:50:21 +0100
commit102987a83771aa994821d4d982d06faa64c789e4 (patch)
tree3e5ab5206886ec297a1a97ce80f504d75468ed2f /debian/patches-rt/powerpc-preempt-lazy-support.patch
parent479cb120ecb2b3f2c4d929a7b57860248d6f79bd (diff)
downloadlinux-debian-102987a83771aa994821d4d982d06faa64c789e4.tar.gz
[rt] Update to 5.4.3-rt1 and re-enable
Adjust for context changes due to backport of e66b39af00f4 ("workqueue: Fix pwq ref leak in rescuer_thread()") and def98c84b6cd ("workqueue: Fix spurious sanity check failures in destroy_workqueue()") in 5.4.4.
Diffstat (limited to 'debian/patches-rt/powerpc-preempt-lazy-support.patch')
-rw-r--r--debian/patches-rt/powerpc-preempt-lazy-support.patch24
1 files changed, 12 insertions, 12 deletions
diff --git a/debian/patches-rt/powerpc-preempt-lazy-support.patch b/debian/patches-rt/powerpc-preempt-lazy-support.patch
index 774244975..11aab2576 100644
--- a/debian/patches-rt/powerpc-preempt-lazy-support.patch
+++ b/debian/patches-rt/powerpc-preempt-lazy-support.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 1 Nov 2012 10:14:11 +0100
Subject: powerpc: Add support for lazy preemption
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2.17-rt9.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.4/older/patches-5.4.3-rt1.tar.xz
Implement the powerpc pieces for lazy preempt.
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -214,6 +214,7 @@ config PPC
+@@ -221,6 +221,7 @@ config PPC
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
@@ -134,22 +134,22 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
addi r12,r2,TI_FLAGS
3: lwarx r8,0,r12
andc r8,r8,r11
-@@ -890,7 +892,14 @@ user_exc_return: /* r10 contains MSR_KE
+@@ -903,7 +905,14 @@ user_exc_return: /* r10 contains MSR_KE
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore_kuap
andi. r8,r8,_TIF_NEED_RESCHED
+ bne+ 1f
-+ lwz r0,TI_PREEMPT_LAZY(r9)
++ lwz r0,TI_PREEMPT_LAZY(r2)
+ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
+ bne restore_kuap
-+ lwz r0,TI_FLAGS(r9)
++ lwz r0,TI_FLAGS(r2)
+ andi. r0,r0,_TIF_NEED_RESCHED_LAZY
beq+ restore_kuap
+1:
lwz r3,_MSR(r1)
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore_kuap /* don't schedule if so */
-@@ -1211,7 +1220,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
+@@ -1224,7 +1233,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
do_work: /* r10 contains MSR_KERNEL here */
@@ -158,7 +158,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
-@@ -1232,7 +1241,7 @@ do_resched: /* r10 contains MSR_KERNEL
+@@ -1245,7 +1254,7 @@ do_resched: /* r10 contains MSR_KERNEL
SYNC
MTMSRD(r10) /* disable interrupts */
lwz r9,TI_FLAGS(r2)
@@ -169,7 +169,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq restore_user
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
-@@ -249,7 +249,9 @@ system_call: /* label this so stack tr
+@@ -240,7 +240,9 @@ system_call: /* label this so stack tr
ld r9,TI_FLAGS(r12)
li r11,-MAX_ERRNO
@@ -180,7 +180,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bne- .Lsyscall_exit_work
andi. r0,r8,MSR_FP
-@@ -372,25 +374,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+@@ -363,25 +365,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
If TIF_NOERROR is set, just save r3 as it is. */
@@ -210,7 +210,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
addi r12,r12,TI_FLAGS
3: ldarx r10,0,r12
andc r10,r10,r11
-@@ -784,7 +786,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+@@ -786,7 +788,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
bl restore_math
b restore
#endif
@@ -219,9 +219,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq 2f
bl restore_interrupts
SCHEDULE_USER
-@@ -846,10 +848,18 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+@@ -848,10 +850,18 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
- #ifdef CONFIG_PREEMPT
+ #ifdef CONFIG_PREEMPTION
/* Check if we need to preempt */
+ lwz r8,TI_PREEMPT(r9)
+ cmpwi 0,r8,0 /* if non-zero, just restore regs and return */