summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/preempt-lazy-support.patch
diff options
context:
space:
mode:
authorRomain Perier <romain.perier@opensource.viveris.fr>2019-01-11 15:52:15 +0100
committerRomain Perier <romain.perier@opensource.viveris.fr>2019-01-11 17:44:15 +0100
commitfe5142170f582165349d8b4e1b4efa83b628b25e (patch)
treed6a7e3f96fe695ccf637407f04cd23028260470c /debian/patches-rt/preempt-lazy-support.patch
parentd31776d28df358119e1c1caec843cc2e6ca1bd8a (diff)
downloadlinux-debian-fe5142170f582165349d8b4e1b4efa83b628b25e.tar.gz
[rt] Update to 4.19.13-rt10
Diffstat (limited to 'debian/patches-rt/preempt-lazy-support.patch')
-rw-r--r--debian/patches-rt/preempt-lazy-support.patch32
1 files changed, 16 insertions, 16 deletions
diff --git a/debian/patches-rt/preempt-lazy-support.patch b/debian/patches-rt/preempt-lazy-support.patch
index 9b223a71e..c63a21b07 100644
--- a/debian/patches-rt/preempt-lazy-support.patch
+++ b/debian/patches-rt/preempt-lazy-support.patch
@@ -1,7 +1,7 @@
Subject: sched: Add support for lazy preemption
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 26 Oct 2012 18:50:54 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.10-rt8.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.13-rt10.tar.xz
It has become an obsession to mitigate the determinism vs. throughput
loss of RT. Looking at the mainline semantics of preemption points
@@ -251,7 +251,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto again;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -492,6 +492,48 @@ void resched_curr(struct rq *rq)
+@@ -491,6 +491,48 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -300,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -2404,6 +2446,9 @@ int sched_fork(unsigned long clone_flags
+@@ -2403,6 +2445,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -310,7 +310,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3471,6 +3516,7 @@ static void __sched notrace __schedule(b
+@@ -3470,6 +3515,7 @@ static void __sched notrace __schedule(b
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -318,7 +318,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
if (likely(prev != next)) {
-@@ -3651,6 +3697,30 @@ static void __sched notrace preempt_sche
+@@ -3650,6 +3696,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -349,7 +349,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
-@@ -3665,7 +3735,8 @@ asmlinkage __visible void __sched notrac
+@@ -3664,7 +3734,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -359,7 +359,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -3692,6 +3763,9 @@ asmlinkage __visible void __sched notrac
+@@ -3691,6 +3762,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -369,7 +369,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -5460,7 +5534,9 @@ void init_idle(struct task_struct *idle,
+@@ -5459,7 +5533,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -380,7 +380,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The idle tasks have their own, simple scheduling class:
*/
-@@ -7182,6 +7258,7 @@ void migrate_disable(void)
+@@ -7181,6 +7257,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -388,7 +388,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pin_current_cpu();
migrate_disable_update_cpus_allowed(p);
-@@ -7249,6 +7326,7 @@ void migrate_enable(void)
+@@ -7248,6 +7325,7 @@ void migrate_enable(void)
arg.dest_cpu = dest_cpu;
unpin_current_cpu();
@@ -396,7 +396,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
-@@ -7257,6 +7335,7 @@ void migrate_enable(void)
+@@ -7256,6 +7334,7 @@ void migrate_enable(void)
}
}
unpin_current_cpu();
@@ -442,7 +442,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static __always_inline
-@@ -5037,7 +5037,7 @@ static void hrtick_start_fair(struct rq
+@@ -5039,7 +5039,7 @@ static void hrtick_start_fair(struct rq
if (delta < 0) {
if (rq->curr == p)
@@ -451,7 +451,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -6612,7 +6612,7 @@ static void check_preempt_wakeup(struct
+@@ -6614,7 +6614,7 @@ static void check_preempt_wakeup(struct
return;
preempt:
@@ -460,7 +460,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -9723,7 +9723,7 @@ static void task_fork_fair(struct task_s
+@@ -9725,7 +9725,7 @@ static void task_fork_fair(struct task_s
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -469,7 +469,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -9747,7 +9747,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -9749,7 +9749,7 @@ prio_changed_fair(struct rq *rq, struct
*/
if (rq->curr == p) {
if (p->prio > oldprio)
@@ -492,7 +492,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1639,6 +1639,15 @@ extern void reweight_task(struct task_st
+@@ -1638,6 +1638,15 @@ extern void reweight_task(struct task_st
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);