summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch')
-rw-r--r--debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch84
1 files changed, 36 insertions, 48 deletions
diff --git a/debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch b/debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch
index 0096c7452..e7ebb59a6 100644
--- a/debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch
+++ b/debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch
@@ -1,7 +1,7 @@
Subject: irqwork: push most work into softirq context
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 23 Jun 2015 15:32:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Initially we defered all irqwork into softirq because we didn't want the
latency spikes if perf or another user was busy and delayed the RT task.
@@ -23,12 +23,12 @@ Mike Galbraith,
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/irq_work.h | 8 ++++++
- kernel/irq_work.c | 60 ++++++++++++++++++++++++++++++++++++-----------
+ kernel/irq_work.c | 59 +++++++++++++++++++++++++++++++++++++----------
kernel/rcu/tree.c | 1
kernel/sched/topology.c | 1
kernel/time/tick-sched.c | 1
kernel/time/timer.c | 2 +
- 6 files changed, 60 insertions(+), 13 deletions(-)
+ 6 files changed, 60 insertions(+), 12 deletions(-)
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* _LINUX_IRQ_WORK_H */
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
-@@ -17,6 +17,7 @@
+@@ -18,6 +18,7 @@
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/smp.h>
@@ -62,52 +62,22 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <asm/processor.h>
-@@ -64,6 +65,8 @@ void __weak arch_irq_work_raise(void)
- */
- bool irq_work_queue_on(struct irq_work *work, int cpu)
- {
-+ struct llist_head *list;
-+
- /* All work should have been flushed before going offline */
- WARN_ON_ONCE(cpu_is_offline(cpu));
-
-@@ -76,7 +79,12 @@ bool irq_work_queue_on(struct irq_work *
- if (!irq_work_claim(work))
- return false;
-
-- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ))
-+ list = &per_cpu(lazy_list, cpu);
-+ else
-+ list = &per_cpu(raised_list, cpu);
-+
-+ if (llist_add(&work->llnode, list))
- arch_send_call_function_single_ipi(cpu);
-
- #else /* #ifdef CONFIG_SMP */
-@@ -89,6 +97,9 @@ bool irq_work_queue_on(struct irq_work *
- /* Enqueue the irq work @work on the current CPU */
- bool irq_work_queue(struct irq_work *work)
+@@ -60,13 +61,19 @@ void __weak arch_irq_work_raise(void)
+ /* Enqueue on current CPU, work must already be claimed and preempt disabled */
+ static void __irq_work_queue_local(struct irq_work *work)
{
+ struct llist_head *list;
+ bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
+
- /* Only queue if not already pending */
- if (!irq_work_claim(work))
- return false;
-@@ -96,13 +107,15 @@ bool irq_work_queue(struct irq_work *wor
- /* Queue the entry and raise the IPI if needed. */
- preempt_disable();
-
-- /* If the work is "lazy", handle it from next tick if any */
++ lazy_work = work->flags & IRQ_WORK_LAZY;
++
+ /* If the work is "lazy", handle it from next tick if any */
- if (work->flags & IRQ_WORK_LAZY) {
- if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
- tick_nohz_tick_stopped())
- arch_irq_work_raise();
- } else {
- if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
-+ lazy_work = work->flags & IRQ_WORK_LAZY;
-+
+ if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ)))
+ list = this_cpu_ptr(&lazy_list);
+ else
@@ -117,8 +87,26 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ if (!lazy_work || tick_nohz_tick_stopped())
arch_irq_work_raise();
}
+ }
+@@ -108,9 +115,16 @@ bool irq_work_queue_on(struct irq_work *
-@@ -119,9 +132,8 @@ bool irq_work_needs_cpu(void)
+ preempt_disable();
+ if (cpu != smp_processor_id()) {
++ struct llist_head *list;
++
+ /* Arch remote IPI send/receive backend aren't NMI safe */
+ WARN_ON_ONCE(in_nmi());
+- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
++ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ))
++ list = &per_cpu(lazy_list, cpu);
++ else
++ list = &per_cpu(raised_list, cpu);
++
++ if (llist_add(&work->llnode, list))
+ arch_send_call_function_single_ipi(cpu);
+ } else {
+ __irq_work_queue_local(work);
+@@ -129,9 +143,8 @@ bool irq_work_needs_cpu(void)
raised = this_cpu_ptr(&raised_list);
lazy = this_cpu_ptr(&lazy_list);
@@ -130,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* All work should have been flushed before going offline */
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
-@@ -135,8 +147,12 @@ static void irq_work_run_list(struct lli
+@@ -145,8 +158,12 @@ static void irq_work_run_list(struct lli
struct llist_node *llnode;
unsigned long flags;
@@ -144,7 +132,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (llist_empty(list))
return;
-@@ -168,7 +184,16 @@ static void irq_work_run_list(struct lli
+@@ -178,7 +195,16 @@ static void irq_work_run_list(struct lli
void irq_work_run(void)
{
irq_work_run_list(this_cpu_ptr(&raised_list));
@@ -162,7 +150,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL_GPL(irq_work_run);
-@@ -178,8 +203,17 @@ void irq_work_tick(void)
+@@ -188,8 +214,17 @@ void irq_work_tick(void)
if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
irq_work_run_list(raised);
@@ -182,7 +170,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Synchronize against the irq_work @entry, ensures the entry is not
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -1152,6 +1152,7 @@ static int rcu_implicit_dynticks_qs(stru
+@@ -1074,6 +1074,7 @@ static int rcu_implicit_dynticks_qs(stru
!rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
(rnp->ffmask & rdp->grpmask)) {
init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
@@ -192,7 +180,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
-@@ -473,6 +473,7 @@ static int init_rootdomain(struct root_d
+@@ -502,6 +502,7 @@ static int init_rootdomain(struct root_d
rd->rto_cpu = -1;
raw_spin_lock_init(&rd->rto_lock);
init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
@@ -202,7 +190,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
init_dl_bw(&rd->dl_bw);
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
-@@ -229,6 +229,7 @@ static void nohz_full_kick_func(struct i
+@@ -235,6 +235,7 @@ static void nohz_full_kick_func(struct i
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
.func = nohz_full_kick_func,
@@ -212,7 +200,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1723,6 +1723,8 @@ static __latent_entropy void run_timer_s
+@@ -1727,6 +1727,8 @@ static __latent_entropy void run_timer_s
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);