summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/x86-preempt-lazy.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/x86-preempt-lazy.patch')
-rw-r--r--debian/patches-rt/x86-preempt-lazy.patch22
1 files changed, 11 insertions, 11 deletions
diff --git a/debian/patches-rt/x86-preempt-lazy.patch b/debian/patches-rt/x86-preempt-lazy.patch
index 767f577fa..e15fbf9b2 100644
--- a/debian/patches-rt/x86-preempt-lazy.patch
+++ b/debian/patches-rt/x86-preempt-lazy.patch
@@ -1,7 +1,7 @@
Subject: x86: Support for lazy preemption
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 01 Nov 2012 11:03:47 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Implement the x86 pieces for lazy preempt.
@@ -18,17 +18,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -183,6 +183,7 @@ config X86
+@@ -192,6 +192,7 @@ config X86
select HAVE_PCI
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_PREEMPT_LAZY
select HAVE_RCU_TABLE_FREE if PARAVIRT
- select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
-@@ -134,7 +134,7 @@ static long syscall_trace_enter(struct p
+@@ -135,7 +135,7 @@ static long syscall_trace_enter(struct p
#define EXIT_TO_USERMODE_LOOP_FLAGS \
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
{
-@@ -149,7 +149,7 @@ static void exit_to_usermode_loop(struct
+@@ -150,7 +150,7 @@ static void exit_to_usermode_loop(struct
/* We have work to do. */
local_irq_enable();
@@ -48,10 +48,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
-@@ -767,8 +767,25 @@ END(ret_from_exception)
+@@ -768,8 +768,25 @@ END(ret_from_exception)
+ #ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
- .Lneed_resched:
+ # preempt count == 0 + NEED_RS set?
cmpl $0, PER_CPU_VAR(__preempt_count)
+#ifndef CONFIG_PREEMPT_LAZY
@@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ jz test_int_off
+
+ # atleast preempt count == 0 ?
-+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
+ jne restore_all_kernel
+
+ movl PER_CPU_VAR(current_task), %ebp
@@ -79,14 +79,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
@@ -646,7 +646,23 @@ GLOBAL(swapgs_restore_regs_and_return_to
btl $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f
- 0: cmpl $0, PER_CPU_VAR(__preempt_count)
+ cmpl $0, PER_CPU_VAR(__preempt_count)
+#ifndef CONFIG_PREEMPT_LAZY
jnz 1f
+#else
+ jz do_preempt_schedule_irq
+
+ # atleast preempt count == 0 ?
-+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
+ jnz 1f
+
+ movq PER_CPU_VAR(current_task), %rcx
@@ -98,8 +98,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+do_preempt_schedule_irq:
+#endif
call preempt_schedule_irq
- jmp 0b
1:
+ #endif
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -89,17 +89,46 @@ static __always_inline void __preempt_co