diff options
Diffstat (limited to 'debian/patches-rt/sched-migrate_disable-fallback-to-preempt_disable-in.patch')
-rw-r--r-- | debian/patches-rt/sched-migrate_disable-fallback-to-preempt_disable-in.patch | 183 |
1 files changed, 0 insertions, 183 deletions
diff --git a/debian/patches-rt/sched-migrate_disable-fallback-to-preempt_disable-in.patch b/debian/patches-rt/sched-migrate_disable-fallback-to-preempt_disable-in.patch deleted file mode 100644 index b18d0effb..000000000 --- a/debian/patches-rt/sched-migrate_disable-fallback-to-preempt_disable-in.patch +++ /dev/null @@ -1,183 +0,0 @@ -From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -Date: Thu, 5 Jul 2018 14:44:51 +0200 -Subject: [PATCH] sched/migrate_disable: fallback to preempt_disable() instead - barrier() -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2.17-rt9.tar.xz - -On SMP + !RT migrate_disable() is still around. It is not part of spin_lock() -anymore so it has almost no users. However the futex code has a workaround for -the !in_atomic() part of migrate disable which fails because the matching -migrade_disable() is no longer part of spin_lock(). - -On !SMP + !RT migrate_disable() is reduced to barrier(). This is not optimal -because we few spots where a "preempt_disable()" statement was replaced with -"migrate_disable()". - -We also used the migration_disable counter to figure out if a sleeping lock is -acquired so RCU does not complain about schedule() during rcu_read_lock() while -a sleeping lock is held. This changed, we no longer use it, we have now a -sleeping_lock counter for the RCU purpose. - -This means we can now: -- for SMP + RT_BASE - full migration program, nothing changes here - -- for !SMP + RT_BASE - the migration counting is no longer required. It used to ensure that the task - is not migrated to another CPU and that this CPU remains online. !SMP ensures - that already. - Move it to CONFIG_SCHED_DEBUG so the counting is done for debugging purpose - only. - -- for all other cases including !RT - fallback to preempt_disable(). The only remaining users of migrate_disable() - are those which were converted from preempt_disable() and the futex - workaround which is already in the preempt_disable() section due to the - spin_lock that is held. - -Cc: stable-rt@vger.kernel.org -Reported-by: joe.korty@concurrent-rt.com -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - include/linux/preempt.h | 6 +++--- - include/linux/sched.h | 4 ++-- - kernel/sched/core.c | 21 ++++++++++----------- - kernel/sched/debug.c | 2 +- - 4 files changed, 16 insertions(+), 17 deletions(-) - ---- a/include/linux/preempt.h -+++ b/include/linux/preempt.h -@@ -201,7 +201,7 @@ do { \ - - #define preemptible() (preempt_count() == 0 && !irqs_disabled()) - --#ifdef CONFIG_SMP -+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) - - extern void migrate_disable(void); - extern void migrate_enable(void); -@@ -218,8 +218,8 @@ static inline int __migrate_disabled(str - } - - #else --#define migrate_disable() barrier() --#define migrate_enable() barrier() -+#define migrate_disable() preempt_disable() -+#define migrate_enable() preempt_enable() - static inline int __migrate_disabled(struct task_struct *p) - { - return 0; ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -658,7 +658,7 @@ struct task_struct { - int nr_cpus_allowed; - const cpumask_t *cpus_ptr; - cpumask_t cpus_mask; --#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) -+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) - int migrate_disable; - int migrate_disable_update; - # ifdef CONFIG_SCHED_DEBUG -@@ -666,8 +666,8 @@ struct task_struct { - # endif - - #elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) -- int migrate_disable; - # ifdef CONFIG_SCHED_DEBUG -+ int migrate_disable; - int migrate_disable_atomic; - # endif - #endif ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -1087,7 +1087,7 @@ void set_cpus_allowed_common(struct task - p->nr_cpus_allowed = cpumask_weight(new_mask); - } - --#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) -+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) - int __migrate_disabled(struct task_struct *p) - { - return p->migrate_disable; -@@ -1127,7 +1127,7 @@ static void __do_set_cpus_allowed_tail(s - - void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) - { --#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) -+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) - if (__migrate_disabled(p)) { - lockdep_assert_held(&p->pi_lock); - -@@ -7186,7 +7186,7 @@ const u32 sched_prio_to_wmult[40] = { - - #undef CREATE_TRACE_POINTS - --#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) -+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) - - static inline void - update_nr_migratory(struct task_struct *p, long delta) -@@ -7330,45 +7330,44 @@ EXPORT_SYMBOL(migrate_enable); - #elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) - void migrate_disable(void) - { -+#ifdef CONFIG_SCHED_DEBUG - struct task_struct *p = current; - - if (in_atomic() || irqs_disabled()) { --#ifdef CONFIG_SCHED_DEBUG - p->migrate_disable_atomic++; --#endif - return; - } --#ifdef CONFIG_SCHED_DEBUG -+ - if (unlikely(p->migrate_disable_atomic)) { - tracing_off(); - WARN_ON_ONCE(1); - } --#endif - - p->migrate_disable++; -+#endif -+ barrier(); - } - EXPORT_SYMBOL(migrate_disable); - - void migrate_enable(void) - { -+#ifdef CONFIG_SCHED_DEBUG - struct task_struct *p = current; - - if (in_atomic() || irqs_disabled()) { --#ifdef CONFIG_SCHED_DEBUG - p->migrate_disable_atomic--; --#endif - return; - } - --#ifdef CONFIG_SCHED_DEBUG - if (unlikely(p->migrate_disable_atomic)) { - tracing_off(); - WARN_ON_ONCE(1); - } --#endif - - WARN_ON_ONCE(p->migrate_disable <= 0); - p->migrate_disable--; -+#endif -+ barrier(); - } - EXPORT_SYMBOL(migrate_enable); - #endif ---- a/kernel/sched/debug.c -+++ b/kernel/sched/debug.c -@@ -979,7 +979,7 @@ void proc_sched_show_task(struct task_st - P(dl.runtime); - P(dl.deadline); - } --#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) -+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) - P(migrate_disable); - #endif - P(nr_cpus_allowed); |