summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0066-sched-Make-cond_resched_lock-variants-RT-aware.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0066-sched-Make-cond_resched_lock-variants-RT-aware.patch')
-rw-r--r--debian/patches-rt/0066-sched-Make-cond_resched_lock-variants-RT-aware.patch100
1 files changed, 100 insertions, 0 deletions
diff --git a/debian/patches-rt/0066-sched-Make-cond_resched_lock-variants-RT-aware.patch b/debian/patches-rt/0066-sched-Make-cond_resched_lock-variants-RT-aware.patch
new file mode 100644
index 000000000..5791c0771
--- /dev/null
+++ b/debian/patches-rt/0066-sched-Make-cond_resched_lock-variants-RT-aware.patch
@@ -0,0 +1,100 @@
+From d2f27a429d92d9e68a4b5e4dc1018c6edd94050a Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 23 Sep 2021 18:54:44 +0200
+Subject: [PATCH 066/158] sched: Make cond_resched_lock() variants RT aware
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.15/older/patches-5.15.10-rt24.tar.xz
+
+The __might_resched() checks in the cond_resched_lock() variants use
+PREEMPT_LOCK_OFFSET for preempt count offset checking which takes the
+preemption disable by the spin_lock() which is still held at that point
+into account.
+
+On PREEMPT_RT enabled kernels spin/rw_lock held sections stay preemptible
+which means PREEMPT_LOCK_OFFSET is 0, but that still triggers the
+__might_resched() check because that takes RCU read side nesting into
+account.
+
+On RT enabled kernels spin/read/write_lock() issue rcu_read_lock() to
+resemble the !RT semantics, which means in cond_resched_lock() the might
+resched check will see preempt_count() == 0 and rcu_preempt_depth() == 1.
+
+Introduce PREEMPT_LOCK_SCHED_OFFSET for those might resched checks and map
+them depending on CONFIG_PREEMPT_RT.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20210923165358.305969211@linutronix.de
+---
+ include/linux/preempt.h | 5 +++--
+ include/linux/sched.h | 34 +++++++++++++++++++++++++---------
+ 2 files changed, 28 insertions(+), 11 deletions(-)
+
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 4d244e295e85..031898b38d06 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -122,9 +122,10 @@
+ * The preempt_count offset after spin_lock()
+ */
+ #if !defined(CONFIG_PREEMPT_RT)
+-#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
++#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
+ #else
+-#define PREEMPT_LOCK_OFFSET 0
++/* Locks on RT do not disable preemption */
++#define PREEMPT_LOCK_OFFSET 0
+ #endif
+
+ /*
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 4dde5e26de8f..2b8c8150792c 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2060,19 +2060,35 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock);
+ #define MIGHT_RESCHED_RCU_SHIFT 8
+ #define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
+
+-#define cond_resched_lock(lock) ({ \
+- __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
+- __cond_resched_lock(lock); \
++#ifndef CONFIG_PREEMPT_RT
++/*
++ * Non RT kernels have an elevated preempt count due to the held lock,
++ * but are not allowed to be inside a RCU read side critical section
++ */
++# define PREEMPT_LOCK_RESCHED_OFFSETS PREEMPT_LOCK_OFFSET
++#else
++/*
++ * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in
++ * cond_resched*lock() has to take that into account because it checks for
++ * preempt_count() and rcu_preempt_depth().
++ */
++# define PREEMPT_LOCK_RESCHED_OFFSETS \
++ (PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
++#endif
++
++#define cond_resched_lock(lock) ({ \
++ __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
++ __cond_resched_lock(lock); \
+ })
+
+-#define cond_resched_rwlock_read(lock) ({ \
+- __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
+- __cond_resched_rwlock_read(lock); \
++#define cond_resched_rwlock_read(lock) ({ \
++ __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
++ __cond_resched_rwlock_read(lock); \
+ })
+
+-#define cond_resched_rwlock_write(lock) ({ \
+- __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
+- __cond_resched_rwlock_write(lock); \
++#define cond_resched_rwlock_write(lock) ({ \
++ __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
++ __cond_resched_rwlock_write(lock); \
+ })
+
+ static inline void cond_resched_rcu(void)
+--
+2.33.1
+