summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0013-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch
blob: 5ff3917279e0b1e87f4e693276cf338323950348 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 16:36:39 +0200
Subject: [PATCH 13/22] locking/rtmutex: export lockdep-less version of
 rt_mutex's lock, trylock and unlock
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.21-rt34.tar.xz

Required for lock implementation ontop of rtmutex.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 kernel/locking/rtmutex.c        |   54 ++++++++++++++++++++++++++++------------
 kernel/locking/rtmutex_common.h |    3 ++
 2 files changed, 41 insertions(+), 16 deletions(-)

--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1469,12 +1469,33 @@ rt_mutex_fastunlock(struct rt_mutex *loc
 		rt_mutex_postunlock(&wake_q);
 }
 
-static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
+int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state)
 {
 	might_sleep();
+	return rt_mutex_fastlock(lock, state, rt_mutex_slowlock);
+}
+
+/**
+ * rt_mutex_lock_state - lock a rt_mutex with a given state
+ *
+ * @lock:      The rt_mutex to be locked
+ * @state:     The state to set when blocking on the rt_mutex
+ */
+static inline int __sched rt_mutex_lock_state(struct rt_mutex *lock,
+					      unsigned int subclass, int state)
+{
+	int ret;
 
 	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
+	ret = __rt_mutex_lock_state(lock, state);
+	if (ret)
+		mutex_release(&lock->dep_map, _RET_IP_);
+	return ret;
+}
+
+static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
+{
+	rt_mutex_lock_state(lock, subclass, TASK_UNINTERRUPTIBLE);
 }
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -1515,16 +1536,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
  */
 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
 {
-	int ret;
-
-	might_sleep();
-
-	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-	ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
-	if (ret)
-		mutex_release(&lock->dep_map, _RET_IP_);
-
-	return ret;
+	return rt_mutex_lock_state(lock, 0, TASK_INTERRUPTIBLE);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
 
@@ -1541,6 +1553,14 @@ int __sched __rt_mutex_futex_trylock(str
 	return __rt_mutex_slowtrylock(lock);
 }
 
+int __sched __rt_mutex_trylock(struct rt_mutex *lock)
+{
+	if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
+		return 0;
+
+	return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
+}
+
 /**
  * rt_mutex_trylock - try to lock a rt_mutex
  *
@@ -1556,10 +1576,7 @@ int __sched rt_mutex_trylock(struct rt_m
 {
 	int ret;
 
-	if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
-		return 0;
-
-	ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
+	ret = __rt_mutex_trylock(lock);
 	if (ret)
 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 
@@ -1567,6 +1584,11 @@ int __sched rt_mutex_trylock(struct rt_m
 }
 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
 
+void __sched __rt_mutex_unlock(struct rt_mutex *lock)
+{
+	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
+}
+
 /**
  * rt_mutex_unlock - unlock a rt_mutex
  *
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -156,6 +156,9 @@ extern bool __rt_mutex_futex_unlock(stru
 extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
 /* RW semaphore special interface */
 
+extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state);
+extern int __rt_mutex_trylock(struct rt_mutex *lock);
+extern void __rt_mutex_unlock(struct rt_mutex *lock);
 int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
 				     struct hrtimer_sleeper *timeout,
 				     enum rtmutex_chainwalk chwalk,