summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/preempt-lazy-support.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/preempt-lazy-support.patch')
-rw-r--r--debian/patches-rt/preempt-lazy-support.patch136
1 files changed, 54 insertions, 82 deletions
diff --git a/debian/patches-rt/preempt-lazy-support.patch b/debian/patches-rt/preempt-lazy-support.patch
index 086b4ceea..97aeed235 100644
--- a/debian/patches-rt/preempt-lazy-support.patch
+++ b/debian/patches-rt/preempt-lazy-support.patch
@@ -1,7 +1,7 @@
Subject: sched: Add support for lazy preemption
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 26 Oct 2012 18:50:54 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2.17-rt9.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.4/older/patches-5.4.3-rt1.tar.xz
It has become an obsession to mitigate the determinism vs. throughput
loss of RT. Looking at the mainline semantics of preemption points
@@ -58,15 +58,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/thread_info.h | 12 +++++-
include/linux/trace_events.h | 1
kernel/Kconfig.preempt | 6 +++
- kernel/cpu.c | 2 +
- kernel/sched/core.c | 83 +++++++++++++++++++++++++++++++++++++++++--
+ kernel/sched/core.c | 82 +++++++++++++++++++++++++++++++++++++++++--
kernel/sched/fair.c | 16 ++++----
kernel/sched/features.h | 3 +
kernel/sched/sched.h | 9 ++++
kernel/trace/trace.c | 35 ++++++++++--------
kernel/trace/trace.h | 2 +
kernel/trace/trace_output.c | 14 ++++++-
- 13 files changed, 227 insertions(+), 29 deletions(-)
+ 12 files changed, 224 insertions(+), 29 deletions(-)
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -115,7 +114,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ preempt_check_resched(); \
+} while (0)
+
- #else /* !CONFIG_PREEMPT */
+ #else /* !CONFIG_PREEMPTION */
#define preempt_enable() \
do { \
@@ -254,6 +281,12 @@ do { \
@@ -142,7 +141,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1744,6 +1744,44 @@ static inline int test_tsk_need_resched(
+@@ -1791,6 +1791,44 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -220,40 +219,21 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TRACE_EVENT_TYPE_MAX \
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
-@@ -7,6 +7,12 @@ config PREEMPT_RT_BASE
- bool
- select PREEMPT
+@@ -1,5 +1,11 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+config HAVE_PREEMPT_LAZY
+ bool
+
+config PREEMPT_LAZY
-+ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL
++ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT
+
choice
prompt "Preemption Model"
default PREEMPT_NONE
---- a/kernel/cpu.c
-+++ b/kernel/cpu.c
-@@ -305,6 +305,7 @@ void pin_current_cpu(void)
- return;
- }
- cpu = smp_processor_id();
-+ preempt_lazy_enable();
- preempt_enable();
-
- sleeping_lock_inc();
-@@ -312,6 +313,7 @@ void pin_current_cpu(void)
- sleeping_lock_dec();
-
- preempt_disable();
-+ preempt_lazy_disable();
- if (cpu != smp_processor_id()) {
- __read_rt_unlock(cpuhp_pin);
- goto again;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -544,6 +544,48 @@ void resched_curr(struct rq *rq)
+@@ -555,6 +555,48 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -302,7 +282,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -2449,6 +2491,9 @@ int sched_fork(unsigned long clone_flags
+@@ -3002,6 +3044,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -312,7 +292,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3507,6 +3552,7 @@ static void __sched notrace __schedule(b
+@@ -4139,6 +4184,7 @@ static void __sched notrace __schedule(b
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -320,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
if (likely(prev != next)) {
-@@ -3690,6 +3736,30 @@ static void __sched notrace preempt_sche
+@@ -4326,6 +4372,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -348,10 +328,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+#endif
+
- #ifdef CONFIG_PREEMPT
+ #ifdef CONFIG_PREEMPTION
/*
- * this is the entry point to schedule() from in-kernel preemption
-@@ -3704,7 +3774,8 @@ asmlinkage __visible void __sched notrac
+ * This is the entry point to schedule() from in-kernel preemption
+@@ -4339,7 +4409,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -361,7 +341,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -3731,6 +3802,9 @@ asmlinkage __visible void __sched notrac
+@@ -4366,6 +4437,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -371,7 +351,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -5496,7 +5570,9 @@ void init_idle(struct task_struct *idle,
+@@ -6156,7 +6230,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -382,33 +362,25 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The idle tasks have their own, simple scheduling class:
*/
-@@ -7250,6 +7326,7 @@ void migrate_disable(void)
+@@ -8093,6 +8169,7 @@ void migrate_disable(void)
+
+ if (++current->migrate_disable == 1) {
+ this_rq()->nr_pinned++;
++ preempt_lazy_disable();
+ #ifdef CONFIG_SCHED_DEBUG
+ WARN_ON_ONCE(current->pinned_on_cpu >= 0);
+ current->pinned_on_cpu = smp_processor_id();
+@@ -8169,6 +8246,7 @@ void migrate_enable(void)
}
- preempt_disable();
-+ preempt_lazy_disable();
- pin_current_cpu();
-
- migrate_disable_update_cpus_allowed(p);
-@@ -7317,6 +7394,7 @@ void migrate_enable(void)
- arg.dest_cpu = dest_cpu;
-
- unpin_current_cpu();
-+ preempt_lazy_enable();
- preempt_enable();
-
- sleeping_lock_inc();
-@@ -7326,6 +7404,7 @@ void migrate_enable(void)
- }
- }
- unpin_current_cpu();
+ out:
+ preempt_lazy_enable();
preempt_enable();
}
EXPORT_SYMBOL(migrate_enable);
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -4104,7 +4104,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -4122,7 +4122,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime) {
@@ -417,7 +389,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The current task ran long enough, ensure it doesn't get
* re-elected due to buddy favours.
-@@ -4128,7 +4128,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -4146,7 +4146,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
return;
if (delta > ideal_runtime)
@@ -426,7 +398,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void
-@@ -4270,7 +4270,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+@@ -4289,7 +4289,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
* validating it and just reschedule.
*/
if (queued) {
@@ -435,7 +407,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
/*
-@@ -4456,7 +4456,7 @@ static void __account_cfs_rq_runtime(str
+@@ -4414,7 +4414,7 @@ static void __account_cfs_rq_runtime(str
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -444,7 +416,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static __always_inline
-@@ -5169,7 +5169,7 @@ static void hrtick_start_fair(struct rq
+@@ -5127,7 +5127,7 @@ static void hrtick_start_fair(struct rq
if (delta < 0) {
if (rq->curr == p)
@@ -453,7 +425,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -7009,7 +7009,7 @@ static void check_preempt_wakeup(struct
+@@ -6729,7 +6729,7 @@ static void check_preempt_wakeup(struct
return;
preempt:
@@ -462,7 +434,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -10281,7 +10281,7 @@ static void task_fork_fair(struct task_s
+@@ -9984,7 +9984,7 @@ static void task_fork_fair(struct task_s
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -471,7 +443,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -10305,7 +10305,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -10008,7 +10008,7 @@ prio_changed_fair(struct rq *rq, struct
*/
if (rq->curr == p) {
if (p->prio > oldprio)
@@ -482,9 +454,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
-@@ -48,6 +48,9 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
+@@ -47,6 +47,9 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
- #ifdef CONFIG_PREEMPT_RT_FULL
+ #ifdef CONFIG_PREEMPT_RT
SCHED_FEAT(TTWU_QUEUE, false)
+# ifdef CONFIG_PREEMPT_LAZY
+SCHED_FEAT(PREEMPT_LAZY, true)
@@ -494,7 +466,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1786,6 +1786,15 @@ extern void reweight_task(struct task_st
+@@ -1876,6 +1876,15 @@ extern void reweight_task(struct task_st
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
@@ -512,15 +484,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2318,6 +2318,7 @@ tracing_generic_entry_update(struct trac
+@@ -2335,6 +2335,7 @@ tracing_generic_entry_update(struct trac
struct task_struct *tsk = current;
entry->preempt_count = pc & 0xff;
+ entry->preempt_lazy_count = preempt_lazy_count();
entry->pid = (tsk) ? tsk->pid : 0;
+ entry->type = type;
entry->flags =
- #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
-@@ -2328,7 +2329,8 @@ tracing_generic_entry_update(struct trac
+@@ -2346,7 +2347,8 @@ tracing_generic_entry_update(struct trac
((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
@@ -530,7 +502,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
-@@ -3555,15 +3557,17 @@ unsigned long trace_total_entries(struct
+@@ -3575,15 +3577,17 @@ unsigned long trace_total_entries(struct
static void print_lat_help_header(struct seq_file *m)
{
@@ -543,21 +515,21 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- "# ||||| / delay \n"
- "# cmd pid |||||| time | caller \n"
- "# \\ / ||||| \\ | / \n");
-+ seq_puts(m, "# _--------=> CPU# \n"
-+ "# / _-------=> irqs-off \n"
-+ "# | / _------=> need-resched \n"
-+ "# || / _-----=> need-resched_lazy \n"
-+ "# ||| / _----=> hardirq/softirq \n"
-+ "# |||| / _---=> preempt-depth \n"
-+ "# ||||| / _--=> preempt-lazy-depth\n"
-+ "# |||||| / _-=> migrate-disable \n"
-+ "# ||||||| / delay \n"
-+ "# cmd pid |||||||| time | caller \n"
-+ "# \\ / |||||||| \\ | / \n");
++ seq_puts(m, "# _--------=> CPU# \n"
++ "# / _-------=> irqs-off \n"
++ "# | / _------=> need-resched \n"
++ "# || / _-----=> need-resched_lazy \n"
++ "# ||| / _----=> hardirq/softirq \n"
++ "# |||| / _---=> preempt-depth \n"
++ "# ||||| / _--=> preempt-lazy-depth\n"
++ "# |||||| / _-=> migrate-disable \n"
++ "# ||||||| / delay \n"
++ "# cmd pid |||||||| time | caller \n"
++ "# \\ / |||||||| \\ | / \n");
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -3599,11 +3603,12 @@ static void print_func_help_header_irq(s
+@@ -3619,11 +3623,12 @@ static void print_func_help_header_irq(s
seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);