summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/powerpc-preempt-lazy-support.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/powerpc-preempt-lazy-support.patch')
-rw-r--r--debian/patches-rt/powerpc-preempt-lazy-support.patch100
1 files changed, 38 insertions, 62 deletions
diff --git a/debian/patches-rt/powerpc-preempt-lazy-support.patch b/debian/patches-rt/powerpc-preempt-lazy-support.patch
index 9c92643a3..0890cb3dd 100644
--- a/debian/patches-rt/powerpc-preempt-lazy-support.patch
+++ b/debian/patches-rt/powerpc-preempt-lazy-support.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 1 Nov 2012 10:14:11 +0100
Subject: powerpc: Add support for lazy preemption
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Implement the powerpc pieces for lazy preempt.
@@ -10,32 +10,32 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/powerpc/Kconfig | 1 +
arch/powerpc/include/asm/thread_info.h | 16 ++++++++++++----
arch/powerpc/kernel/asm-offsets.c | 1 +
- arch/powerpc/kernel/entry_32.S | 29 +++++++++++++++++++----------
- arch/powerpc/kernel/entry_64.S | 26 ++++++++++++++++++--------
- 5 files changed, 51 insertions(+), 22 deletions(-)
+ arch/powerpc/kernel/entry_32.S | 23 ++++++++++++++++-------
+ arch/powerpc/kernel/entry_64.S | 24 +++++++++++++++++-------
+ 5 files changed, 47 insertions(+), 18 deletions(-)
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -219,6 +219,7 @@ config PPC
+@@ -213,6 +213,7 @@ config PPC
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_PREEMPT_LAZY
select HAVE_RCU_TABLE_FREE if SMP
- select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_RELIABLE_STACKTRACE if PPC64 && CPU_LITTLE_ENDIAN
+ select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
+ select HAVE_MMU_GATHER_PAGE_SIZE
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
-@@ -38,6 +38,8 @@ struct thread_info {
- int cpu; /* cpu we're on */
+@@ -30,6 +30,8 @@
+ struct thread_info {
int preempt_count; /* 0 => preemptable,
<0 => BUG */
-+ int preempt_lazy_count; /* 0 => preemptable,
++ int preempt_lazy_count; /* 0 => preemptable,
+ <0 => BUG */
unsigned long local_flags; /* private flags for thread */
#ifdef CONFIG_LIVEPATCH
unsigned long *livepatch_sp;
-@@ -99,11 +101,12 @@ void arch_setup_new_exec(void);
+@@ -80,11 +82,12 @@ void arch_setup_new_exec(void);
#define TIF_SINGLESTEP 8 /* singlestepping active */
#define TIF_NOHZ 9 /* in adaptive nohz mode */
#define TIF_SECCOMP 10 /* secure computing */
@@ -51,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
for stack store? */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
-@@ -112,6 +115,9 @@ void arch_setup_new_exec(void);
+@@ -93,6 +96,9 @@ void arch_setup_new_exec(void);
#endif
#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_32BIT 20 /* 32 bit binary */
@@ -61,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-@@ -131,6 +137,7 @@ void arch_setup_new_exec(void);
+@@ -112,6 +118,7 @@ void arch_setup_new_exec(void);
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
#define _TIF_NOHZ (1<<TIF_NOHZ)
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define _TIF_FSCHECK (1<<TIF_FSCHECK)
#define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
-@@ -140,8 +147,9 @@ void arch_setup_new_exec(void);
+@@ -121,8 +128,9 @@ void arch_setup_new_exec(void);
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
_TIF_RESTORE_TM | _TIF_PATCH_PENDING | \
@@ -82,19 +82,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
-@@ -161,6 +161,7 @@ int main(void)
+@@ -167,6 +167,7 @@ int main(void)
OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
OFFSET(TI_PREEMPT, thread_info, preempt_count);
+ OFFSET(TI_PREEMPT_LAZY, thread_info, preempt_lazy_count);
- OFFSET(TI_TASK, thread_info, task);
- OFFSET(TI_CPU, thread_info, cpu);
+ #ifdef CONFIG_PPC64
+ OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size);
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
-@@ -393,7 +393,9 @@ reenable_mmu: /* re-enable mmu so we
+@@ -400,7 +400,9 @@
MTMSRD(r10)
- lwz r9,TI_FLAGS(r12)
+ lwz r9,TI_FLAGS(r2)
li r8,-MAX_ERRNO
- andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
+ lis r0,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@h
@@ -103,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bne- syscall_exit_work
cmplw 0,r3,r8
blt+ syscall_exit_cont
-@@ -511,13 +513,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
+@@ -515,13 +517,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
b syscall_dotrace_cont
syscall_exit_work:
@@ -119,52 +119,37 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bne- 1f
lwz r11,_CCR(r1) /* Load CR */
neg r3,r3
-@@ -526,12 +528,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
+@@ -530,12 +532,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
1: stw r6,RESULT(r1) /* Save result */
stw r3,GPR3(r1) /* Update return value */
-2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
-+2: andis. r0,r9,(_TIF_PERSYSCALL_MASK)@h
++2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)@h
beq 4f
/* Clear per-syscall TIF flags if any are set. */
- li r11,_TIF_PERSYSCALL_MASK
-+ lis r11,_TIF_PERSYSCALL_MASK@h
- addi r12,r12,TI_FLAGS
++ li r11,_TIF_PERSYSCALL_MASK@h
+ addi r12,r2,TI_FLAGS
3: lwarx r8,0,r12
andc r8,r8,r11
-@@ -888,7 +890,14 @@ user_exc_return: /* r10 contains MSR_KE
+@@ -890,7 +892,14 @@ user_exc_return: /* r10 contains MSR_KE
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
- bne restore
+ bne restore_kuap
andi. r8,r8,_TIF_NEED_RESCHED
+ bne+ 1f
+ lwz r0,TI_PREEMPT_LAZY(r9)
-+ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
-+ bne restore
++ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
++ bne restore_kuap
+ lwz r0,TI_FLAGS(r9)
+ andi. r0,r0,_TIF_NEED_RESCHED_LAZY
- beq+ restore
+ beq+ restore_kuap
+1:
lwz r3,_MSR(r1)
andi. r0,r3,MSR_EE /* interrupts off? */
- beq restore /* don't schedule if so */
-@@ -899,11 +908,11 @@ user_exc_return: /* r10 contains MSR_KE
- */
- bl trace_hardirqs_off
- #endif
--1: bl preempt_schedule_irq
-+2: bl preempt_schedule_irq
- CURRENT_THREAD_INFO(r9, r1)
- lwz r3,TI_FLAGS(r9)
-- andi. r0,r3,_TIF_NEED_RESCHED
-- bne- 1b
-+ andi. r0,r3,_TIF_NEED_RESCHED_MASK
-+ bne- 2b
- #ifdef CONFIG_TRACE_IRQFLAGS
- /* And now, to properly rebalance the above, we tell lockdep they
- * are being turned back on, which will happen when we return
-@@ -1232,7 +1241,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
+ beq restore_kuap /* don't schedule if so */
+@@ -1211,7 +1220,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
do_work: /* r10 contains MSR_KERNEL here */
@@ -173,10 +158,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
-@@ -1253,7 +1262,7 @@ do_resched: /* r10 contains MSR_KERNEL
+@@ -1232,7 +1241,7 @@ do_resched: /* r10 contains MSR_KERNEL
+ SYNC
MTMSRD(r10) /* disable interrupts */
- CURRENT_THREAD_INFO(r9, r1)
- lwz r9,TI_FLAGS(r9)
+ lwz r9,TI_FLAGS(r2)
- andi. r0,r9,_TIF_NEED_RESCHED
+ andi. r0,r9,_TIF_NEED_RESCHED_MASK
bne- do_resched
@@ -184,7 +169,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq restore_user
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
-@@ -253,7 +253,9 @@ system_call: /* label this so stack tr
+@@ -249,7 +249,9 @@ system_call: /* label this so stack tr
ld r9,TI_FLAGS(r12)
li r11,-MAX_ERRNO
@@ -195,7 +180,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bne- .Lsyscall_exit_work
andi. r0,r8,MSR_FP
-@@ -370,25 +372,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+@@ -372,25 +374,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
If TIF_NOERROR is set, just save r3 as it is. */
@@ -225,7 +210,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
addi r12,r12,TI_FLAGS
3: ldarx r10,0,r12
andc r10,r10,r11
-@@ -780,7 +782,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+@@ -784,7 +786,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
bl restore_math
b restore
#endif
@@ -234,7 +219,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq 2f
bl restore_interrupts
SCHEDULE_USER
-@@ -842,10 +844,18 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+@@ -846,10 +848,18 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
#ifdef CONFIG_PREEMPT
/* Check if we need to preempt */
@@ -254,12 +239,3 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cmpwi cr0,r8,0
bne restore
ld r0,SOFTE(r1)
-@@ -862,7 +872,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
- /* Re-test flags and eventually loop */
- CURRENT_THREAD_INFO(r9, r1)
- ld r4,TI_FLAGS(r9)
-- andi. r0,r4,_TIF_NEED_RESCHED
-+ andi. r0,r4,_TIF_NEED_RESCHED_MASK
- bne 1b
-
- /*