diff options
Diffstat (limited to 'debian/patches-rt/softirq__Check_preemption_after_reenabling_interrupts.patch')
-rw-r--r-- | debian/patches-rt/softirq__Check_preemption_after_reenabling_interrupts.patch | 103 |
1 files changed, 103 insertions, 0 deletions
diff --git a/debian/patches-rt/softirq__Check_preemption_after_reenabling_interrupts.patch b/debian/patches-rt/softirq__Check_preemption_after_reenabling_interrupts.patch new file mode 100644 index 000000000..5f99e7649 --- /dev/null +++ b/debian/patches-rt/softirq__Check_preemption_after_reenabling_interrupts.patch @@ -0,0 +1,103 @@ +Subject: softirq: Check preemption after reenabling interrupts +From: Thomas Gleixner <tglx@linutronix.de> +Date: Sun Nov 13 17:17:09 2011 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.15/older/patches-5.15.3-rt21.tar.xz + +From: Thomas Gleixner <tglx@linutronix.de> + +raise_softirq_irqoff() disables interrupts and wakes the softirq +daemon, but after reenabling interrupts there is no preemption check, +so the execution of the softirq thread might be delayed arbitrarily. + +In principle we could add that check to local_irq_enable/restore, but +that's overkill as the rasie_softirq_irqoff() sections are the only +ones which show this behaviour. + +Reported-by: Carsten Emde <cbe@osadl.org> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> + + + +--- + include/linux/preempt.h | 3 +++ + net/core/dev.c | 7 +++++++ + 2 files changed, 10 insertions(+) +--- +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h +@@ -191,8 +191,10 @@ do { \ + + #ifndef CONFIG_PREEMPT_RT + # define preempt_enable_no_resched() sched_preempt_enable_no_resched() ++# define preempt_check_resched_rt() barrier(); + #else + # define preempt_enable_no_resched() preempt_enable() ++# define preempt_check_resched_rt() preempt_check_resched() + #endif + + #define preemptible() (preempt_count() == 0 && !irqs_disabled()) +@@ -263,6 +265,7 @@ do { \ + #define preempt_disable_notrace() barrier() + #define preempt_enable_no_resched_notrace() barrier() + #define preempt_enable_notrace() barrier() ++#define preempt_check_resched_rt() barrier() + #define preemptible() 0 + + #endif /* CONFIG_PREEMPT_COUNT */ +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3040,6 +3040,7 @@ static void __netif_reschedule(struct Qd + sd->output_queue_tailp = &q->next_sched; + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + void __netif_schedule(struct Qdisc *q) +@@ -3102,6 +3103,7 @@ void __dev_kfree_skb_irq(struct sk_buff + __this_cpu_write(softnet_data.completion_queue, skb); + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(__dev_kfree_skb_irq); + +@@ -4644,6 +4646,7 @@ static int enqueue_to_backlog(struct sk_ + rps_unlock(sd); + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + + atomic_long_inc(&skb->dev->rx_dropped); + kfree_skb(skb); +@@ -6387,12 +6390,14 @@ static void net_rps_action_and_irq_enabl + sd->rps_ipi_list = NULL; + + local_irq_enable(); ++ preempt_check_resched_rt(); + + /* Send pending IPI's to kick RPS processing on remote cpus. */ + net_rps_send_ipi(remsd); + } else + #endif + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) +@@ -6470,6 +6475,7 @@ void __napi_schedule(struct napi_struct + local_irq_save(flags); + ____napi_schedule(this_cpu_ptr(&softnet_data), n); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(__napi_schedule); + +@@ -11292,6 +11298,7 @@ static int dev_cpu_dead(unsigned int old + + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + + #ifdef CONFIG_RPS + remsd = oldsd->rps_ipi_list; |