summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0170-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0170-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch')
-rw-r--r--debian/patches-rt/0170-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch145
1 files changed, 0 insertions, 145 deletions
diff --git a/debian/patches-rt/0170-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch b/debian/patches-rt/0170-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch
deleted file mode 100644
index ec9749cc2..000000000
--- a/debian/patches-rt/0170-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch
+++ /dev/null
@@ -1,145 +0,0 @@
-From bd9cecae1dc3c66e172989c8a9f0936177e1e2fb Mon Sep 17 00:00:00 2001
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Thu, 12 Oct 2017 16:14:22 +0200
-Subject: [PATCH 170/296] locking/rtmutex: Provide rt_mutex_slowlock_locked()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.35-rt39.tar.xz
-
-This is the inner-part of rt_mutex_slowlock(), required for rwsem-rt.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/locking/rtmutex.c | 67 +++++++++++++++++++--------------
- kernel/locking/rtmutex_common.h | 7 ++++
- 2 files changed, 45 insertions(+), 29 deletions(-)
-
-diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
-index 5148a2b49c55..e7645a09d0fb 100644
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -1234,35 +1234,16 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
- }
- }
-
--/*
-- * Slow path lock function:
-- */
--static int __sched
--rt_mutex_slowlock(struct rt_mutex *lock, int state,
-- struct hrtimer_sleeper *timeout,
-- enum rtmutex_chainwalk chwalk)
-+int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
-+ struct hrtimer_sleeper *timeout,
-+ enum rtmutex_chainwalk chwalk,
-+ struct rt_mutex_waiter *waiter)
- {
-- struct rt_mutex_waiter waiter;
-- unsigned long flags;
-- int ret = 0;
--
-- rt_mutex_init_waiter(&waiter);
--
-- /*
-- * Technically we could use raw_spin_[un]lock_irq() here, but this can
-- * be called in early boot if the cmpxchg() fast path is disabled
-- * (debug, no architecture support). In this case we will acquire the
-- * rtmutex with lock->wait_lock held. But we cannot unconditionally
-- * enable interrupts in that early boot case. So we need to use the
-- * irqsave/restore variants.
-- */
-- raw_spin_lock_irqsave(&lock->wait_lock, flags);
-+ int ret;
-
- /* Try to acquire the lock again: */
-- if (try_to_take_rt_mutex(lock, current, NULL)) {
-- raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-+ if (try_to_take_rt_mutex(lock, current, NULL))
- return 0;
-- }
-
- set_current_state(state);
-
-@@ -1270,16 +1251,16 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
- if (unlikely(timeout))
- hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
-
-- ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
-+ ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
-
- if (likely(!ret))
- /* sleep on the mutex */
-- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
-+ ret = __rt_mutex_slowlock(lock, state, timeout, waiter);
-
- if (unlikely(ret)) {
- __set_current_state(TASK_RUNNING);
-- remove_waiter(lock, &waiter);
-- rt_mutex_handle_deadlock(ret, chwalk, &waiter);
-+ remove_waiter(lock, waiter);
-+ rt_mutex_handle_deadlock(ret, chwalk, waiter);
- }
-
- /*
-@@ -1287,6 +1268,34 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
- * unconditionally. We might have to fix that up.
- */
- fixup_rt_mutex_waiters(lock);
-+ return ret;
-+}
-+
-+/*
-+ * Slow path lock function:
-+ */
-+static int __sched
-+rt_mutex_slowlock(struct rt_mutex *lock, int state,
-+ struct hrtimer_sleeper *timeout,
-+ enum rtmutex_chainwalk chwalk)
-+{
-+ struct rt_mutex_waiter waiter;
-+ unsigned long flags;
-+ int ret = 0;
-+
-+ rt_mutex_init_waiter(&waiter);
-+
-+ /*
-+ * Technically we could use raw_spin_[un]lock_irq() here, but this can
-+ * be called in early boot if the cmpxchg() fast path is disabled
-+ * (debug, no architecture support). In this case we will acquire the
-+ * rtmutex with lock->wait_lock held. But we cannot unconditionally
-+ * enable interrupts in that early boot case. So we need to use the
-+ * irqsave/restore variants.
-+ */
-+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
-+
-+ ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, &waiter);
-
- raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-
-diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
-index 37cd6b3bf6f4..b5a2affa59d5 100644
---- a/kernel/locking/rtmutex_common.h
-+++ b/kernel/locking/rtmutex_common.h
-@@ -15,6 +15,7 @@
-
- #include <linux/rtmutex.h>
- #include <linux/sched/wake_q.h>
-+#include <linux/sched/debug.h>
-
- /*
- * This is the control structure for tasks blocked on a rt_mutex,
-@@ -153,6 +154,12 @@ extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
- struct wake_q_head *wqh);
-
- extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
-+/* RW semaphore special interface */
-+
-+int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
-+ struct hrtimer_sleeper *timeout,
-+ enum rtmutex_chainwalk chwalk,
-+ struct rt_mutex_waiter *waiter);
-
- #ifdef CONFIG_DEBUG_RT_MUTEXES
- # include "rtmutex-debug.h"
---
-2.30.2
-