diff options
Diffstat (limited to 'debian/patches-rt/x86-preempt-lazy.patch')
-rw-r--r-- | debian/patches-rt/x86-preempt-lazy.patch | 50 |
1 files changed, 28 insertions, 22 deletions
diff --git a/debian/patches-rt/x86-preempt-lazy.patch b/debian/patches-rt/x86-preempt-lazy.patch index b35d3a523..85fc4e0e2 100644 --- a/debian/patches-rt/x86-preempt-lazy.patch +++ b/debian/patches-rt/x86-preempt-lazy.patch @@ -1,7 +1,7 @@ Subject: x86: Support for lazy preemption From: Thomas Gleixner <tglx@linutronix.de> Date: Thu, 01 Nov 2012 11:03:47 +0100 -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2.17-rt9.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.4/older/patches-5.4.3-rt1.tar.xz Implement the x86 pieces for lazy preempt. @@ -9,16 +9,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- arch/x86/Kconfig | 1 + arch/x86/entry/common.c | 4 ++-- - arch/x86/entry/entry_32.S | 17 +++++++++++++++++ + arch/x86/entry/entry_32.S | 18 ++++++++++++++++++ arch/x86/entry/entry_64.S | 16 ++++++++++++++++ - arch/x86/include/asm/preempt.h | 31 ++++++++++++++++++++++++++++++- + arch/x86/include/asm/preempt.h | 33 ++++++++++++++++++++++++++++++++- arch/x86/include/asm/thread_info.h | 11 +++++++++++ - arch/x86/kernel/asm-offsets.c | 2 ++ - 7 files changed, 79 insertions(+), 3 deletions(-) + arch/x86/kernel/asm-offsets.c | 5 +++++ + 7 files changed, 85 insertions(+), 3 deletions(-) --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig -@@ -192,6 +192,7 @@ config X86 +@@ -200,6 +200,7 @@ config X86 select HAVE_PCI select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP @@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c -@@ -135,7 +135,7 @@ static long syscall_trace_enter(struct p +@@ -130,7 +130,7 @@ static long syscall_trace_enter(struct p #define EXIT_TO_USERMODE_LOOP_FLAGS \ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ @@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) { -@@ -150,7 +150,7 @@ static void exit_to_usermode_loop(struct +@@ -145,7 +145,7 @@ static void exit_to_usermode_loop(struct /* We have work to do. */ local_irq_enable(); @@ -48,31 +48,32 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #ifdef ARCH_RT_DELAYS_SIGNAL_SEND --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S -@@ -768,8 +768,25 @@ END(ret_from_exception) - #ifdef CONFIG_PREEMPT - ENTRY(resume_kernel) +@@ -1106,8 +1106,26 @@ ENTRY(entry_INT80_32) + restore_all_kernel: + #ifdef CONFIG_PREEMPTION DISABLE_INTERRUPTS(CLBR_ANY) + # preempt count == 0 + NEED_RS set? cmpl $0, PER_CPU_VAR(__preempt_count) +#ifndef CONFIG_PREEMPT_LAZY - jnz restore_all_kernel + jnz .Lno_preempt +#else + jz test_int_off + + # atleast preempt count == 0 ? + cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) -+ jne restore_all_kernel ++ jne .Lno_preempt + + movl PER_CPU_VAR(current_task), %ebp + cmpl $0,TASK_TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? -+ jnz restore_all_kernel ++ jnz .Lno_preempt + + testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp) -+ jz restore_all_kernel ++ jz .Lno_preempt ++ +test_int_off: +#endif testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? - jz restore_all_kernel + jz .Lno_preempt call preempt_schedule_irq --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -102,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #endif --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h -@@ -89,17 +89,46 @@ static __always_inline void __preempt_co +@@ -89,17 +89,48 @@ static __always_inline void __preempt_co * a decrement which hits zero means we have no preempt_count and should * reschedule. */ @@ -117,6 +118,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> + if (____preempt_count_dec_and_test()) + return true; +#ifdef CONFIG_PREEMPT_LAZY ++ if (preempt_count()) ++ return false; + if (current_thread_info()->preempt_lazy_count) + return false; + return test_thread_flag(TIF_NEED_RESCHED_LAZY); @@ -132,7 +135,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> { +#ifdef CONFIG_PREEMPT_LAZY + u32 tmp; -+ + tmp = raw_cpu_read_4(__preempt_count); + if (tmp == preempt_offset) + return true; @@ -141,6 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> + tmp &= ~PREEMPT_NEED_RESCHED; + if (tmp != preempt_offset) + return false; ++ /* XXX PREEMPT_LOCK_OFFSET */ + if (current_thread_info()->preempt_lazy_count) + return false; + return test_thread_flag(TIF_NEED_RESCHED_LAZY); @@ -149,7 +152,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +#endif } - #ifdef CONFIG_PREEMPT + #ifdef CONFIG_PREEMPTION --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -56,17 +56,24 @@ struct task_struct; @@ -204,15 +207,18 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c -@@ -39,6 +39,7 @@ static void __used common(void) +@@ -38,6 +38,10 @@ static void __used common(void) + #endif BLANK(); - OFFSET(TASK_TI_flags, task_struct, thread_info.flags); ++#ifdef CONFIG_PREEMPT_LAZY ++ OFFSET(TASK_TI_flags, task_struct, thread_info.flags); + OFFSET(TASK_TI_preempt_lazy_count, task_struct, thread_info.preempt_lazy_count); ++#endif OFFSET(TASK_addr_limit, task_struct, thread.addr_limit); BLANK(); -@@ -92,6 +93,7 @@ static void __used common(void) +@@ -92,6 +96,7 @@ static void __used common(void) BLANK(); DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); |