summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0001-preempt-Provide-preempt_-dis-en-able_nested.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0001-preempt-Provide-preempt_-dis-en-able_nested.patch')
-rw-r--r--debian/patches-rt/0001-preempt-Provide-preempt_-dis-en-able_nested.patch85
1 files changed, 85 insertions, 0 deletions
diff --git a/debian/patches-rt/0001-preempt-Provide-preempt_-dis-en-able_nested.patch b/debian/patches-rt/0001-preempt-Provide-preempt_-dis-en-able_nested.patch
new file mode 100644
index 000000000..26762101b
--- /dev/null
+++ b/debian/patches-rt/0001-preempt-Provide-preempt_-dis-en-able_nested.patch
@@ -0,0 +1,85 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 25 Aug 2022 18:41:24 +0200
+Subject: [PATCH 1/8] preempt: Provide preempt_[dis|en]able_nested()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.0/older/patches-6.0-rt11.tar.xz
+
+On PREEMPT_RT enabled kernels, spinlocks and rwlocks are neither disabling
+preemption nor interrupts. Though there are a few places which depend on
+the implicit preemption/interrupt disable of those locks, e.g. seqcount
+write sections, per CPU statistics updates etc.
+
+To avoid sprinkling CONFIG_PREEMPT_RT conditionals all over the place, add
+preempt_disable_nested() and preempt_enable_nested() which should be
+descriptive enough.
+
+Add a lockdep assertion for the !PREEMPT_RT case to catch callers which
+do not have preemption disabled.
+
+Cc: Ben Segall <bsegall@google.com>
+Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
+Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Juri Lelli <juri.lelli@redhat.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Valentin Schneider <vschneid@redhat.com>
+Cc: Vincent Guittot <vincent.guittot@linaro.org>
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20220825164131.402717-2-bigeasy@linutronix.de
+---
+ include/linux/preempt.h | 42 ++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 42 insertions(+)
+
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -421,4 +421,46 @@ static inline void migrate_enable(void)
+
+ #endif /* CONFIG_SMP */
+
++/**
++ * preempt_disable_nested - Disable preemption inside a normally preempt disabled section
++ *
++ * Use for code which requires preemption protection inside a critical
++ * section which has preemption disabled implicitly on non-PREEMPT_RT
++ * enabled kernels, by e.g.:
++ * - holding a spinlock/rwlock
++ * - soft interrupt context
++ * - regular interrupt handlers
++ *
++ * On PREEMPT_RT enabled kernels spinlock/rwlock held sections, soft
++ * interrupt context and regular interrupt handlers are preemptible and
++ * only prevent migration. preempt_disable_nested() ensures that preemption
++ * is disabled for cases which require CPU local serialization even on
++ * PREEMPT_RT. For non-PREEMPT_RT kernels this is a NOP.
++ *
++ * The use cases are code sequences which are not serialized by a
++ * particular lock instance, e.g.:
++ * - seqcount write side critical sections where the seqcount is not
++ * associated to a particular lock and therefore the automatic
++ * protection mechanism does not work. This prevents a live lock
++ * against a preempting high priority reader.
++ * - RMW per CPU variable updates like vmstat.
++ */
++/* Macro to avoid header recursion hell vs. lockdep */
++#define preempt_disable_nested() \
++do { \
++ if (IS_ENABLED(CONFIG_PREEMPT_RT)) \
++ preempt_disable(); \
++ else \
++ lockdep_assert_preemption_disabled(); \
++} while (0)
++
++/**
++ * preempt_enable_nested - Undo the effect of preempt_disable_nested()
++ */
++static __always_inline void preempt_enable_nested(void)
++{
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ preempt_enable();
++}
++
+ #endif /* __LINUX_PREEMPT_H */