summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0055-irq_work-Also-rcuwait-for-IRQ_WORK_HARD_IRQ-on-PREEM.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0055-irq_work-Also-rcuwait-for-IRQ_WORK_HARD_IRQ-on-PREEM.patch')
-rw-r--r--debian/patches-rt/0055-irq_work-Also-rcuwait-for-IRQ_WORK_HARD_IRQ-on-PREEM.patch63
1 files changed, 63 insertions, 0 deletions
diff --git a/debian/patches-rt/0055-irq_work-Also-rcuwait-for-IRQ_WORK_HARD_IRQ-on-PREEM.patch b/debian/patches-rt/0055-irq_work-Also-rcuwait-for-IRQ_WORK_HARD_IRQ-on-PREEM.patch
new file mode 100644
index 000000000..bed7de837
--- /dev/null
+++ b/debian/patches-rt/0055-irq_work-Also-rcuwait-for-IRQ_WORK_HARD_IRQ-on-PREEM.patch
@@ -0,0 +1,63 @@
+From eb1f04a88f1f21b5cc7c29127e10a4ce2e68dbad Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 6 Oct 2021 13:18:52 +0200
+Subject: [PATCH 055/158] irq_work: Also rcuwait for !IRQ_WORK_HARD_IRQ on
+ PREEMPT_RT
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.15/older/patches-5.15.10-rt24.tar.xz
+
+On PREEMPT_RT most items are processed as LAZY via softirq context.
+Avoid to spin-wait for them because irq_work_sync() could have higher
+priority and not allow the irq-work to be completed.
+
+Wait additionally for !IRQ_WORK_HARD_IRQ irq_work items on PREEMPT_RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20211006111852.1514359-5-bigeasy@linutronix.de
+---
+ include/linux/irq_work.h | 5 +++++
+ kernel/irq_work.c | 6 ++++--
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
+index b48955e9c920..8cd11a223260 100644
+--- a/include/linux/irq_work.h
++++ b/include/linux/irq_work.h
+@@ -49,6 +49,11 @@ static inline bool irq_work_is_busy(struct irq_work *work)
+ return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY;
+ }
+
++static inline bool irq_work_is_hard(struct irq_work *work)
++{
++ return atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ;
++}
++
+ bool irq_work_queue(struct irq_work *work);
+ bool irq_work_queue_on(struct irq_work *work, int cpu);
+
+diff --git a/kernel/irq_work.c b/kernel/irq_work.c
+index 90b6b56f92e9..f7df715ec28e 100644
+--- a/kernel/irq_work.c
++++ b/kernel/irq_work.c
+@@ -217,7 +217,8 @@ void irq_work_single(void *arg)
+ */
+ (void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY);
+
+- if (!arch_irq_work_has_interrupt())
++ if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) ||
++ !arch_irq_work_has_interrupt())
+ rcuwait_wake_up(&work->irqwait);
+ }
+
+@@ -277,7 +278,8 @@ void irq_work_sync(struct irq_work *work)
+ lockdep_assert_irqs_enabled();
+ might_sleep();
+
+- if (!arch_irq_work_has_interrupt()) {
++ if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) ||
++ !arch_irq_work_has_interrupt()) {
+ rcuwait_wait_event(&work->irqwait, !irq_work_is_busy(work),
+ TASK_UNINTERRUPTIBLE);
+ return;
+--
+2.33.1
+