summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0081-rtmutex-Add-rt_mutex_lock_nest_lock-and-rt_mutex_loc.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0081-rtmutex-Add-rt_mutex_lock_nest_lock-and-rt_mutex_loc.patch')
-rw-r--r--debian/patches-rt/0081-rtmutex-Add-rt_mutex_lock_nest_lock-and-rt_mutex_loc.patch123
1 files changed, 123 insertions, 0 deletions
diff --git a/debian/patches-rt/0081-rtmutex-Add-rt_mutex_lock_nest_lock-and-rt_mutex_loc.patch b/debian/patches-rt/0081-rtmutex-Add-rt_mutex_lock_nest_lock-and-rt_mutex_loc.patch
new file mode 100644
index 000000000..e85b8c88c
--- /dev/null
+++ b/debian/patches-rt/0081-rtmutex-Add-rt_mutex_lock_nest_lock-and-rt_mutex_loc.patch
@@ -0,0 +1,123 @@
+From db5ef2f66ddae88b6b4e8901f49572251fee5d25 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 13 Aug 2021 13:49:49 +0200
+Subject: [PATCH 081/158] rtmutex: Add rt_mutex_lock_nest_lock() and
+ rt_mutex_lock_killable().
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.15/older/patches-5.15.10-rt24.tar.xz
+
+The locking selftest for ww-mutex expects to operate directly on the
+base-mutex which becomes a rtmutex on PREEMPT_RT.
+
+Add rt_mutex_lock_nest_lock(), follows mutex_lock_nest_lock() for
+rtmutex.
+Add rt_mutex_lock_killable(), follows mutex_lock_killable() for rtmutex.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/rtmutex.h | 9 +++++++++
+ kernel/locking/rtmutex_api.c | 30 ++++++++++++++++++++++++++----
+ 2 files changed, 35 insertions(+), 4 deletions(-)
+
+diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
+index 9deedfeec2b1..7d049883a08a 100644
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
+@@ -99,13 +99,22 @@ extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock
+
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
++extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock);
+ #define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
++#define rt_mutex_lock_nest_lock(lock, nest_lock) \
++ do { \
++ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
++ _rt_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
++ } while (0)
++
+ #else
+ extern void rt_mutex_lock(struct rt_mutex *lock);
+ #define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
++#define rt_mutex_lock_nest_lock(lock, nest_lock) rt_mutex_lock(lock)
+ #endif
+
+ extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
++extern int rt_mutex_lock_killable(struct rt_mutex *lock);
+ extern int rt_mutex_trylock(struct rt_mutex *lock);
+
+ extern void rt_mutex_unlock(struct rt_mutex *lock);
+diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c
+index 5c9299aaabae..900220941caa 100644
+--- a/kernel/locking/rtmutex_api.c
++++ b/kernel/locking/rtmutex_api.c
+@@ -21,12 +21,13 @@ int max_lock_depth = 1024;
+ */
+ static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
+ unsigned int state,
++ struct lockdep_map *nest_lock,
+ unsigned int subclass)
+ {
+ int ret;
+
+ might_sleep();
+- mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
++ mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_);
+ ret = __rt_mutex_lock(&lock->rtmutex, state);
+ if (ret)
+ mutex_release(&lock->dep_map, _RET_IP_);
+@@ -48,10 +49,16 @@ EXPORT_SYMBOL(rt_mutex_base_init);
+ */
+ void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
+ {
+- __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
++ __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass);
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
+
++void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock)
++{
++ __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, nest_lock, 0);
++}
++EXPORT_SYMBOL_GPL(_rt_mutex_lock_nest_lock);
++
+ #else /* !CONFIG_DEBUG_LOCK_ALLOC */
+
+ /**
+@@ -61,7 +68,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
+ */
+ void __sched rt_mutex_lock(struct rt_mutex *lock)
+ {
+- __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
++ __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, 0);
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+ #endif
+@@ -77,10 +84,25 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+ */
+ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
+ {
+- return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
++ return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, NULL, 0);
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
+
++/**
++ * rt_mutex_lock_killable - lock a rt_mutex killable
++ *
++ * @lock: the rt_mutex to be locked
++ *
++ * Returns:
++ * 0 on success
++ * -EINTR when interrupted by a signal
++ */
++int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
++{
++ return __rt_mutex_lock_common(lock, TASK_KILLABLE, NULL, 0);
++}
++EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
++
+ /**
+ * rt_mutex_trylock - try to lock a rt_mutex
+ *
+--
+2.33.1
+