summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/futex-workaround-migrate_disable-enable-in-different.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/futex-workaround-migrate_disable-enable-in-different.patch')
-rw-r--r--debian/patches-rt/futex-workaround-migrate_disable-enable-in-different.patch67
1 files changed, 67 insertions, 0 deletions
diff --git a/debian/patches-rt/futex-workaround-migrate_disable-enable-in-different.patch b/debian/patches-rt/futex-workaround-migrate_disable-enable-in-different.patch
new file mode 100644
index 000000000..7d89fbcbf
--- /dev/null
+++ b/debian/patches-rt/futex-workaround-migrate_disable-enable-in-different.patch
@@ -0,0 +1,67 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 8 Mar 2017 14:23:35 +0100
+Subject: [PATCH] futex: workaround migrate_disable/enable in different context
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.4/older/patches-5.4.3-rt1.tar.xz
+
+migrate_enable() invokes __schedule() and it expects a preempt count of one.
+Holding a raw_spinlock_t with disabled interrupts should not allow scheduling.
+
+These little hacks ensure that we don't schedule while we lock the hb lockwith
+interrupts enabled and unlock it with interrupts disabled.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+[XXX: As per PeterZ suggesstion
+ set_thread_flag(TIF_NEED_RESCHED); preempt_fold_need_resched()
+ would trigger a scheduler invocation on the last preempt_enable() which in
+ turn would allow to drop this.
+]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/futex.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2980,6 +2980,14 @@ static int futex_lock_pi(u32 __user *uad
+ * before __rt_mutex_start_proxy_lock() is done.
+ */
+ raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
++ /*
++ * the migrate_disable() here disables migration in the in_atomic() fast
++ * path which is enabled again in the following spin_unlock(). We have
++ * one migrate_disable() pending in the slow-path which is reversed
++ * after the raw_spin_unlock_irq() where we leave the atomic context.
++ */
++ migrate_disable();
++
+ spin_unlock(q.lock_ptr);
+ /*
+ * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
+@@ -2988,6 +2996,7 @@ static int futex_lock_pi(u32 __user *uad
+ */
+ ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
+ raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
++ migrate_enable();
+
+ if (ret) {
+ if (ret == 1)
+@@ -3136,10 +3145,19 @@ static int futex_unlock_pi(u32 __user *u
+ * rt_waiter. Also see the WARN in wake_futex_pi().
+ */
+ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
++ /*
++ * Magic trickery for now to make the RT migrate disable
++ * logic happy. The following spin_unlock() happens with
++ * interrupts disabled so the internal migrate_enable()
++ * won't undo the migrate_disable() which was issued when
++ * locking hb->lock.
++ */
++ migrate_disable();
+ spin_unlock(&hb->lock);
+
+ /* drops pi_state->pi_mutex.wait_lock */
+ ret = wake_futex_pi(uaddr, uval, pi_state);
++ migrate_enable();
+
+ put_pi_state(pi_state);
+