summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0002-sched-Fix-balance_callback.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0002-sched-Fix-balance_callback.patch')
-rw-r--r--debian/patches-rt/0002-sched-Fix-balance_callback.patch22
1 files changed, 11 insertions, 11 deletions
diff --git a/debian/patches-rt/0002-sched-Fix-balance_callback.patch b/debian/patches-rt/0002-sched-Fix-balance_callback.patch
index e440eb621..02cc74a49 100644
--- a/debian/patches-rt/0002-sched-Fix-balance_callback.patch
+++ b/debian/patches-rt/0002-sched-Fix-balance_callback.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz@infradead.org>
Date: Fri, 23 Oct 2020 12:12:00 +0200
Subject: [PATCH 02/19] sched: Fix balance_callback()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.17-rt32.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.21-rt34.tar.xz
The intent of balance_callback() has always been to delay executing
balancing operations until the end of the current rq->lock section.
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3490,6 +3490,69 @@ static inline void finish_task(struct ta
+@@ -3487,6 +3487,69 @@ static inline void finish_task(struct ta
#endif
}
@@ -99,7 +99,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void
prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
{
-@@ -3515,6 +3578,7 @@ static inline void finish_lock_switch(st
+@@ -3512,6 +3575,7 @@ static inline void finish_lock_switch(st
* prev into current:
*/
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irq(&rq->lock);
}
-@@ -3656,43 +3720,6 @@ static struct rq *finish_task_switch(str
+@@ -3653,43 +3717,6 @@ static struct rq *finish_task_switch(str
return rq;
}
@@ -151,7 +151,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* schedule_tail - first thing a freshly forked thread must call.
* @prev: the thread we just switched away from.
-@@ -3712,7 +3739,6 @@ asmlinkage __visible void schedule_tail(
+@@ -3709,7 +3736,6 @@ asmlinkage __visible void schedule_tail(
*/
rq = finish_task_switch(prev);
@@ -159,7 +159,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_enable();
if (current->set_child_tid)
-@@ -4528,10 +4554,11 @@ static void __sched notrace __schedule(b
+@@ -4525,10 +4551,11 @@ static void __sched notrace __schedule(b
rq = context_switch(rq, prev, next, &rf);
} else {
rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
@@ -174,7 +174,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
void __noreturn do_task_dead(void)
-@@ -4943,9 +4970,11 @@ void rt_mutex_setprio(struct task_struct
+@@ -4940,9 +4967,11 @@ void rt_mutex_setprio(struct task_struct
out_unlock:
/* Avoid rq from going away on us: */
preempt_disable();
@@ -188,7 +188,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_enable();
}
#else
-@@ -5219,6 +5248,7 @@ static int __sched_setscheduler(struct t
+@@ -5216,6 +5245,7 @@ static int __sched_setscheduler(struct t
int retval, oldprio, oldpolicy = -1, queued, running;
int new_effective_prio, policy = attr->sched_policy;
const struct sched_class *prev_class;
@@ -196,7 +196,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rq_flags rf;
int reset_on_fork;
int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
-@@ -5457,6 +5487,7 @@ static int __sched_setscheduler(struct t
+@@ -5454,6 +5484,7 @@ static int __sched_setscheduler(struct t
/* Avoid rq from going away on us: */
preempt_disable();
@@ -204,7 +204,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
task_rq_unlock(rq, p, &rf);
if (pi) {
-@@ -5465,7 +5496,7 @@ static int __sched_setscheduler(struct t
+@@ -5462,7 +5493,7 @@ static int __sched_setscheduler(struct t
}
/* Run balance callbacks after we've adjusted the PI chain: */
@@ -215,7 +215,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1215,6 +1215,9 @@ static inline void rq_pin_lock(struct rq
+@@ -1216,6 +1216,9 @@ static inline void rq_pin_lock(struct rq
rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
rf->clock_update_flags = 0;
#endif