summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/futex-Make-the-futex_hash_bucket-lock-raw.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/futex-Make-the-futex_hash_bucket-lock-raw.patch')
-rw-r--r--debian/patches-rt/futex-Make-the-futex_hash_bucket-lock-raw.patch331
1 files changed, 0 insertions, 331 deletions
diff --git a/debian/patches-rt/futex-Make-the-futex_hash_bucket-lock-raw.patch b/debian/patches-rt/futex-Make-the-futex_hash_bucket-lock-raw.patch
deleted file mode 100644
index 9a9dd403a..000000000
--- a/debian/patches-rt/futex-Make-the-futex_hash_bucket-lock-raw.patch
+++ /dev/null
@@ -1,331 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 26 Jun 2019 11:59:44 +0200
-Subject: [PATCH] futex: Make the futex_hash_bucket lock raw
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2.17-rt9.tar.xz
-
-Since commit 1a1fb985f2e2b ("futex: Handle early deadlock return
-correctly") we can deadlock while we attempt to acquire the HB lock if
-we fail to acquire the lock.
-The RT waiter (for the futex lock) is still enqueued and acquiring the
-HB lock may build up a lock chain which leads to a deadlock if the owner
-of the lock futex-lock holds the HB lock.
-
-Make the hash bucket lock raw so it does not participate in the
-lockchain.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/futex.c | 86 ++++++++++++++++++++++++++++-----------------------------
- 1 file changed, 43 insertions(+), 43 deletions(-)
-
---- a/kernel/futex.c
-+++ b/kernel/futex.c
-@@ -231,7 +231,7 @@ struct futex_q {
- struct plist_node list;
-
- struct task_struct *task;
-- spinlock_t *lock_ptr;
-+ raw_spinlock_t *lock_ptr;
- union futex_key key;
- struct futex_pi_state *pi_state;
- struct rt_mutex_waiter *rt_waiter;
-@@ -252,7 +252,7 @@ static const struct futex_q futex_q_init
- */
- struct futex_hash_bucket {
- atomic_t waiters;
-- spinlock_t lock;
-+ raw_spinlock_t lock;
- struct plist_head chain;
- } ____cacheline_aligned_in_smp;
-
-@@ -900,7 +900,7 @@ void exit_pi_state_list(struct task_stru
- }
- raw_spin_unlock_irq(&curr->pi_lock);
-
-- spin_lock(&hb->lock);
-+ raw_spin_lock(&hb->lock);
- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
- raw_spin_lock(&curr->pi_lock);
- /*
-@@ -910,7 +910,7 @@ void exit_pi_state_list(struct task_stru
- if (head->next != next) {
- /* retain curr->pi_lock for the loop invariant */
- raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
-- spin_unlock(&hb->lock);
-+ raw_spin_unlock(&hb->lock);
- put_pi_state(pi_state);
- continue;
- }
-@@ -922,7 +922,7 @@ void exit_pi_state_list(struct task_stru
-
- raw_spin_unlock(&curr->pi_lock);
- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
-- spin_unlock(&hb->lock);
-+ raw_spin_unlock(&hb->lock);
-
- rt_mutex_futex_unlock(&pi_state->pi_mutex);
- put_pi_state(pi_state);
-@@ -1542,21 +1542,21 @@ static inline void
- double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
- {
- if (hb1 <= hb2) {
-- spin_lock(&hb1->lock);
-+ raw_spin_lock(&hb1->lock);
- if (hb1 < hb2)
-- spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
-+ raw_spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
- } else { /* hb1 > hb2 */
-- spin_lock(&hb2->lock);
-- spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
-+ raw_spin_lock(&hb2->lock);
-+ raw_spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
- }
- }
-
- static inline void
- double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
- {
-- spin_unlock(&hb1->lock);
-+ raw_spin_unlock(&hb1->lock);
- if (hb1 != hb2)
-- spin_unlock(&hb2->lock);
-+ raw_spin_unlock(&hb2->lock);
- }
-
- /*
-@@ -1584,7 +1584,7 @@ futex_wake(u32 __user *uaddr, unsigned i
- if (!hb_waiters_pending(hb))
- goto out_put_key;
-
-- spin_lock(&hb->lock);
-+ raw_spin_lock(&hb->lock);
-
- plist_for_each_entry_safe(this, next, &hb->chain, list) {
- if (match_futex (&this->key, &key)) {
-@@ -1603,7 +1603,7 @@ futex_wake(u32 __user *uaddr, unsigned i
- }
- }
-
-- spin_unlock(&hb->lock);
-+ raw_spin_unlock(&hb->lock);
- wake_up_q(&wake_q);
- out_put_key:
- put_futex_key(&key);
-@@ -2208,7 +2208,7 @@ static inline struct futex_hash_bucket *
-
- q->lock_ptr = &hb->lock;
-
-- spin_lock(&hb->lock);
-+ raw_spin_lock(&hb->lock);
- return hb;
- }
-
-@@ -2216,7 +2216,7 @@ static inline void
- queue_unlock(struct futex_hash_bucket *hb)
- __releases(&hb->lock)
- {
-- spin_unlock(&hb->lock);
-+ raw_spin_unlock(&hb->lock);
- hb_waiters_dec(hb);
- }
-
-@@ -2255,7 +2255,7 @@ static inline void queue_me(struct futex
- __releases(&hb->lock)
- {
- __queue_me(q, hb);
-- spin_unlock(&hb->lock);
-+ raw_spin_unlock(&hb->lock);
- }
-
- /**
-@@ -2271,41 +2271,41 @@ static inline void queue_me(struct futex
- */
- static int unqueue_me(struct futex_q *q)
- {
-- spinlock_t *lock_ptr;
-+ raw_spinlock_t *lock_ptr;
- int ret = 0;
-
- /* In the common case we don't take the spinlock, which is nice. */
- retry:
- /*
-- * q->lock_ptr can change between this read and the following spin_lock.
-- * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
-- * optimizing lock_ptr out of the logic below.
-+ * q->lock_ptr can change between this read and the following
-+ * raw_spin_lock. Use READ_ONCE to forbid the compiler from reloading
-+ * q->lock_ptr and optimizing lock_ptr out of the logic below.
- */
- lock_ptr = READ_ONCE(q->lock_ptr);
- if (lock_ptr != NULL) {
-- spin_lock(lock_ptr);
-+ raw_spin_lock(lock_ptr);
- /*
- * q->lock_ptr can change between reading it and
-- * spin_lock(), causing us to take the wrong lock. This
-+ * raw_spin_lock(), causing us to take the wrong lock. This
- * corrects the race condition.
- *
- * Reasoning goes like this: if we have the wrong lock,
- * q->lock_ptr must have changed (maybe several times)
-- * between reading it and the spin_lock(). It can
-- * change again after the spin_lock() but only if it was
-- * already changed before the spin_lock(). It cannot,
-+ * between reading it and the raw_spin_lock(). It can
-+ * change again after the raw_spin_lock() but only if it was
-+ * already changed before the raw_spin_lock(). It cannot,
- * however, change back to the original value. Therefore
- * we can detect whether we acquired the correct lock.
- */
- if (unlikely(lock_ptr != q->lock_ptr)) {
-- spin_unlock(lock_ptr);
-+ raw_spin_unlock(lock_ptr);
- goto retry;
- }
- __unqueue_futex(q);
-
- BUG_ON(q->pi_state);
-
-- spin_unlock(lock_ptr);
-+ raw_spin_unlock(lock_ptr);
- ret = 1;
- }
-
-@@ -2327,7 +2327,7 @@ static void unqueue_me_pi(struct futex_q
- put_pi_state(q->pi_state);
- q->pi_state = NULL;
-
-- spin_unlock(q->lock_ptr);
-+ raw_spin_unlock(q->lock_ptr);
- }
-
- static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
-@@ -2460,7 +2460,7 @@ static int fixup_pi_state_owner(u32 __us
- */
- handle_err:
- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
-- spin_unlock(q->lock_ptr);
-+ raw_spin_unlock(q->lock_ptr);
-
- switch (err) {
- case -EFAULT:
-@@ -2478,7 +2478,7 @@ static int fixup_pi_state_owner(u32 __us
- break;
- }
-
-- spin_lock(q->lock_ptr);
-+ raw_spin_lock(q->lock_ptr);
- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
-
- /*
-@@ -2574,7 +2574,7 @@ static void futex_wait_queue_me(struct f
- /*
- * The task state is guaranteed to be set before another task can
- * wake it. set_current_state() is implemented using smp_store_mb() and
-- * queue_me() calls spin_unlock() upon completion, both serializing
-+ * queue_me() calls raw_spin_unlock() upon completion, both serializing
- * access to the hash list and forcing another memory barrier.
- */
- set_current_state(TASK_INTERRUPTIBLE);
-@@ -2867,7 +2867,7 @@ static int futex_lock_pi(u32 __user *uad
- * before __rt_mutex_start_proxy_lock() is done.
- */
- raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
-- spin_unlock(q.lock_ptr);
-+ raw_spin_unlock(q.lock_ptr);
- /*
- * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
- * such that futex_unlock_pi() is guaranteed to observe the waiter when
-@@ -2888,7 +2888,7 @@ static int futex_lock_pi(u32 __user *uad
- ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
-
- cleanup:
-- spin_lock(q.lock_ptr);
-+ raw_spin_lock(q.lock_ptr);
- /*
- * If we failed to acquire the lock (deadlock/signal/timeout), we must
- * first acquire the hb->lock before removing the lock from the
-@@ -2989,7 +2989,7 @@ static int futex_unlock_pi(u32 __user *u
- return ret;
-
- hb = hash_futex(&key);
-- spin_lock(&hb->lock);
-+ raw_spin_lock(&hb->lock);
-
- /*
- * Check waiters first. We do not trust user space values at
-@@ -3023,7 +3023,7 @@ static int futex_unlock_pi(u32 __user *u
- * rt_waiter. Also see the WARN in wake_futex_pi().
- */
- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
-- spin_unlock(&hb->lock);
-+ raw_spin_unlock(&hb->lock);
-
- /* drops pi_state->pi_mutex.wait_lock */
- ret = wake_futex_pi(uaddr, uval, pi_state);
-@@ -3062,7 +3062,7 @@ static int futex_unlock_pi(u32 __user *u
- * owner.
- */
- if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) {
-- spin_unlock(&hb->lock);
-+ raw_spin_unlock(&hb->lock);
- switch (ret) {
- case -EFAULT:
- goto pi_faulted;
-@@ -3082,7 +3082,7 @@ static int futex_unlock_pi(u32 __user *u
- ret = (curval == uval) ? 0 : -EAGAIN;
-
- out_unlock:
-- spin_unlock(&hb->lock);
-+ raw_spin_unlock(&hb->lock);
- out_putkey:
- put_futex_key(&key);
- return ret;
-@@ -3257,9 +3257,9 @@ static int futex_wait_requeue_pi(u32 __u
- /* Queue the futex_q, drop the hb lock, wait for wakeup. */
- futex_wait_queue_me(hb, &q, to);
-
-- spin_lock(&hb->lock);
-+ raw_spin_lock(&hb->lock);
- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
-- spin_unlock(&hb->lock);
-+ raw_spin_unlock(&hb->lock);
- if (ret)
- goto out_put_keys;
-
-@@ -3279,7 +3279,7 @@ static int futex_wait_requeue_pi(u32 __u
- * did a lock-steal - fix up the PI-state in that case.
- */
- if (q.pi_state && (q.pi_state->owner != current)) {
-- spin_lock(q.lock_ptr);
-+ raw_spin_lock(q.lock_ptr);
- ret = fixup_pi_state_owner(uaddr2, &q, current);
- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
- pi_state = q.pi_state;
-@@ -3290,7 +3290,7 @@ static int futex_wait_requeue_pi(u32 __u
- * the requeue_pi() code acquired for us.
- */
- put_pi_state(q.pi_state);
-- spin_unlock(q.lock_ptr);
-+ raw_spin_unlock(q.lock_ptr);
- }
- } else {
- struct rt_mutex *pi_mutex;
-@@ -3304,7 +3304,7 @@ static int futex_wait_requeue_pi(u32 __u
- pi_mutex = &q.pi_state->pi_mutex;
- ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
-
-- spin_lock(q.lock_ptr);
-+ raw_spin_lock(q.lock_ptr);
- if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
- ret = 0;
-
-@@ -3929,7 +3929,7 @@ static int __init futex_init(void)
- for (i = 0; i < futex_hashsize; i++) {
- atomic_set(&futex_queues[i].waiters, 0);
- plist_head_init(&futex_queues[i].chain);
-- spin_lock_init(&futex_queues[i].lock);
-+ raw_spin_lock_init(&futex_queues[i].lock);
- }
-
- return 0;