summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/softirq-Add-preemptible-softirq.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/softirq-Add-preemptible-softirq.patch')
-rw-r--r--debian/patches-rt/softirq-Add-preemptible-softirq.patch87
1 files changed, 47 insertions, 40 deletions
diff --git a/debian/patches-rt/softirq-Add-preemptible-softirq.patch b/debian/patches-rt/softirq-Add-preemptible-softirq.patch
index 435bcde26..4775acf47 100644
--- a/debian/patches-rt/softirq-Add-preemptible-softirq.patch
+++ b/debian/patches-rt/softirq-Add-preemptible-softirq.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 20 May 2019 13:09:08 +0200
Subject: [PATCH] softirq: Add preemptible softirq
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2.17-rt9.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.4/older/patches-5.4.3-rt1.tar.xz
Add preemptible softirq for RT's needs. By removing the softirq count
from the preempt counter, the softirq becomes preemptible. A per-CPU
@@ -11,6 +11,7 @@ per-CPU variables are not access in parallel by multiple threads.
local_bh_enable() will process all softirq work that has been raised in
its BH-disabled section once the BH counter gets to 0.
+[+ rcu_read_lock() as part of local_bh_disable() by Scott Wood]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/bottom_half.h | 5
@@ -18,9 +19,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
include/linux/preempt.h | 17 ++-
include/linux/rcupdate.h | 3
include/linux/sched.h | 3
- kernel/softirq.c | 222 +++++++++++++++++++++++++++++++++++++++++++-
+ kernel/softirq.c | 228 +++++++++++++++++++++++++++++++++++++++++++-
kernel/time/tick-sched.c | 9 -
- 7 files changed, 246 insertions(+), 14 deletions(-)
+ 7 files changed, 252 insertions(+), 14 deletions(-)
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -28,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/preempt.h>
-+#ifdef CONFIG_PREEMPT_RT_FULL
++#ifdef CONFIG_PREEMPT_RT
+extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
+#else
+
@@ -45,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -556,6 +556,7 @@ extern void __raise_softirq_irqoff(unsig
+@@ -561,6 +561,7 @@ extern void __raise_softirq_irqoff(unsig
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
@@ -76,11 +77,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define in_nmi() (preempt_count() & NMI_MASK)
#define in_task() (!(preempt_count() & \
(NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
-+#ifdef CONFIG_PREEMPT_RT_FULL
++#ifdef CONFIG_PREEMPT_RT
+
-+#define softirq_count() ((long)get_current()->softirq_count)
++#define softirq_count() ((long)current->softirq_count)
+#define in_softirq() (softirq_count())
-+#define in_serving_softirq() (get_current()->softirq_count & SOFTIRQ_OFFSET)
++#define in_serving_softirq() (current->softirq_count & SOFTIRQ_OFFSET)
+
+#else
+
@@ -94,23 +95,23 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* The preempt_count offset after preempt_disable();
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
-@@ -272,7 +272,8 @@ static inline void rcu_preempt_sleep_che
+@@ -279,7 +279,8 @@ static inline void rcu_preempt_sleep_che
#define rcu_sleep_check() \
do { \
rcu_preempt_sleep_check(); \
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
-+ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) \
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
"Illegal context switch in RCU-bh read-side critical section"); \
RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
"Illegal context switch in RCU-sched read-side critical section"); \
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -923,6 +923,9 @@ struct task_struct {
+@@ -978,6 +978,9 @@ struct task_struct {
int softirqs_enabled;
int softirq_context;
#endif
-+#ifdef CONFIG_PREEMPT_RT_FULL
++#ifdef CONFIG_PREEMPT_RT
+ int softirq_count;
+#endif
@@ -122,17 +123,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/smpboot.h>
#include <linux/tick.h>
#include <linux/irq.h>
-+#ifdef CONFIG_PREEMPT_RT_FULL
++#ifdef CONFIG_PREEMPT_RT
+#include <linux/locallock.h>
+#endif
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
-@@ -102,6 +105,98 @@ static bool ksoftirqd_running(unsigned l
+@@ -102,6 +105,104 @@ static bool ksoftirqd_running(unsigned l
* softirq and whether we just have bh disabled.
*/
-+#ifdef CONFIG_PREEMPT_RT_FULL
++#ifdef CONFIG_PREEMPT_RT
+static DEFINE_LOCAL_IRQ_LOCK(bh_lock);
+static DEFINE_PER_CPU(long, softirq_counter);
+
@@ -142,8 +143,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ long soft_cnt;
+
+ WARN_ON_ONCE(in_irq());
-+ if (!in_atomic())
++ if (!in_atomic()) {
+ local_lock(bh_lock);
++ rcu_read_lock();
++ }
+ soft_cnt = this_cpu_inc_return(softirq_counter);
+ WARN_ON_ONCE(soft_cnt == 0);
+ current->softirq_count += SOFTIRQ_DISABLE_OFFSET;
@@ -178,8 +181,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
+
+ current->softirq_count -= SOFTIRQ_DISABLE_OFFSET;
-+ if (!in_atomic())
++ if (!in_atomic()) {
++ rcu_read_unlock();
+ local_unlock(bh_lock);
++ }
+}
+
+void _local_bh_enable_rt(void)
@@ -212,8 +217,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ WARN_ON_ONCE(count < 0);
+ local_irq_enable();
+
-+ if (!in_atomic())
++ if (!in_atomic()) {
++ rcu_read_unlock();
+ local_unlock(bh_lock);
++ }
+
+ current->softirq_count -= SOFTIRQ_DISABLE_OFFSET;
+ preempt_check_resched();
@@ -227,7 +234,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* This one is for softirq.c-internal use,
* where hardirqs are disabled legitimately:
-@@ -196,6 +291,7 @@ void __local_bh_enable_ip(unsigned long
+@@ -196,6 +297,7 @@ void __local_bh_enable_ip(unsigned long
preempt_check_resched();
}
EXPORT_SYMBOL(__local_bh_enable_ip);
@@ -235,11 +242,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
-@@ -266,7 +362,11 @@ asmlinkage __visible void __softirq_entr
+@@ -266,7 +368,11 @@ asmlinkage __visible void __softirq_entr
pending = local_softirq_pending();
account_irq_enter_time(current);
-+#ifdef CONFIG_PREEMPT_RT_FULL
++#ifdef CONFIG_PREEMPT_RT
+ current->softirq_count |= SOFTIRQ_OFFSET;
+#else
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
@@ -247,23 +254,23 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
in_hardirq = lockdep_softirq_start();
restart:
-@@ -300,9 +400,10 @@ asmlinkage __visible void __softirq_entr
+@@ -300,9 +406,10 @@ asmlinkage __visible void __softirq_entr
h++;
pending >>= softirq_bit;
}
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
++#ifndef CONFIG_PREEMPT_RT
if (__this_cpu_read(ksoftirqd) == current)
rcu_softirq_qs();
+#endif
local_irq_disable();
pending = local_softirq_pending();
-@@ -316,11 +417,16 @@ asmlinkage __visible void __softirq_entr
+@@ -316,11 +423,16 @@ asmlinkage __visible void __softirq_entr
lockdep_softirq_end(in_hardirq);
account_irq_exit_time(current);
-+#ifdef CONFIG_PREEMPT_RT_FULL
++#ifdef CONFIG_PREEMPT_RT
+ current->softirq_count &= ~SOFTIRQ_OFFSET;
+#else
__local_bh_enable(SOFTIRQ_OFFSET);
@@ -272,11 +279,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
current_restore_flags(old_flags, PF_MEMALLOC);
}
-+#ifndef CONFIG_PREEMPT_RT_FULL
++#ifndef CONFIG_PREEMPT_RT
asmlinkage __visible void do_softirq(void)
{
__u32 pending;
-@@ -338,6 +444,7 @@ asmlinkage __visible void do_softirq(voi
+@@ -338,6 +450,7 @@ asmlinkage __visible void do_softirq(voi
local_irq_restore(flags);
}
@@ -284,11 +291,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Enter an interrupt context.
-@@ -358,6 +465,16 @@ void irq_enter(void)
+@@ -358,6 +471,16 @@ void irq_enter(void)
__irq_enter();
}
-+#ifdef CONFIG_PREEMPT_RT_FULL
++#ifdef CONFIG_PREEMPT_RT
+
+static inline void invoke_softirq(void)
+{
@@ -301,7 +308,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void invoke_softirq(void)
{
if (ksoftirqd_running(local_softirq_pending()))
-@@ -383,6 +500,7 @@ static inline void invoke_softirq(void)
+@@ -383,6 +506,7 @@ static inline void invoke_softirq(void)
wakeup_softirqd();
}
}
@@ -309,11 +316,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void tick_irq_exit(void)
{
-@@ -420,6 +538,27 @@ void irq_exit(void)
+@@ -420,6 +544,27 @@ void irq_exit(void)
/*
* This function must run with irqs disabled!
*/
-+#ifdef CONFIG_PREEMPT_RT_FULL
++#ifdef CONFIG_PREEMPT_RT
+void raise_softirq_irqoff(unsigned int nr)
+{
+ __raise_softirq_irqoff(nr);
@@ -337,7 +344,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
inline void raise_softirq_irqoff(unsigned int nr)
{
__raise_softirq_irqoff(nr);
-@@ -437,6 +576,8 @@ inline void raise_softirq_irqoff(unsigne
+@@ -437,6 +582,8 @@ inline void raise_softirq_irqoff(unsigne
wakeup_softirqd();
}
@@ -346,7 +353,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void raise_softirq(unsigned int nr)
{
unsigned long flags;
-@@ -594,6 +735,7 @@ static int ksoftirqd_should_run(unsigned
+@@ -594,6 +741,7 @@ static int ksoftirqd_should_run(unsigned
static void run_ksoftirqd(unsigned int cpu)
{
@@ -354,7 +361,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_irq_disable();
if (local_softirq_pending()) {
/*
-@@ -602,10 +744,12 @@ static void run_ksoftirqd(unsigned int c
+@@ -602,10 +750,12 @@ static void run_ksoftirqd(unsigned int c
*/
__do_softirq();
local_irq_enable();
@@ -367,11 +374,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#ifdef CONFIG_HOTPLUG_CPU
-@@ -679,6 +823,13 @@ static struct smp_hotplug_thread softirq
+@@ -679,6 +829,13 @@ static struct smp_hotplug_thread softirq
static __init int spawn_ksoftirqd(void)
{
-+#ifdef CONFIG_PREEMPT_RT_FULL
++#ifdef CONFIG_PREEMPT_RT
+ int cpu;
+
+ for_each_possible_cpu(cpu)
@@ -381,11 +388,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
takeover_tasklets);
BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
-@@ -687,6 +838,75 @@ static __init int spawn_ksoftirqd(void)
+@@ -687,6 +844,75 @@ static __init int spawn_ksoftirqd(void)
}
early_initcall(spawn_ksoftirqd);
-+#ifdef CONFIG_PREEMPT_RT_FULL
++#ifdef CONFIG_PREEMPT_RT
+
+/*
+ * On preempt-rt a softirq running context might be blocked on a
@@ -459,7 +466,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* GCC does not inline them incorrectly. ]
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
-@@ -893,14 +893,7 @@ static bool can_stop_idle_tick(int cpu,
+@@ -899,14 +899,7 @@ static bool can_stop_idle_tick(int cpu,
return false;
if (unlikely(local_softirq_pending())) {