summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0003-mm-vmstat-Use-preempt_-dis-en-able_nested.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0003-mm-vmstat-Use-preempt_-dis-en-able_nested.patch')
-rw-r--r--debian/patches-rt/0003-mm-vmstat-Use-preempt_-dis-en-able_nested.patch141
1 files changed, 141 insertions, 0 deletions
diff --git a/debian/patches-rt/0003-mm-vmstat-Use-preempt_-dis-en-able_nested.patch b/debian/patches-rt/0003-mm-vmstat-Use-preempt_-dis-en-able_nested.patch
new file mode 100644
index 000000000..145a58e10
--- /dev/null
+++ b/debian/patches-rt/0003-mm-vmstat-Use-preempt_-dis-en-able_nested.patch
@@ -0,0 +1,141 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 25 Aug 2022 18:41:26 +0200
+Subject: [PATCH 3/8] mm/vmstat: Use preempt_[dis|en]able_nested()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/6.0/older/patches-6.0-rt11.tar.xz
+
+Replace the open coded CONFIG_PREEMPT_RT conditional
+preempt_enable/disable() pairs with the new helper functions which hide
+the underlying implementation details.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: linux-mm@kvack.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20220825164131.402717-4-bigeasy@linutronix.de
+---
+ mm/vmstat.c | 36 ++++++++++++------------------------
+ 1 file changed, 12 insertions(+), 24 deletions(-)
+
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -355,8 +355,7 @@ void __mod_zone_page_state(struct zone *
+ * CPU migrations and preemption potentially corrupts a counter so
+ * disable preemption.
+ */
+- if (IS_ENABLED(CONFIG_PREEMPT_RT))
+- preempt_disable();
++ preempt_disable_nested();
+
+ x = delta + __this_cpu_read(*p);
+
+@@ -368,8 +367,7 @@ void __mod_zone_page_state(struct zone *
+ }
+ __this_cpu_write(*p, x);
+
+- if (IS_ENABLED(CONFIG_PREEMPT_RT))
+- preempt_enable();
++ preempt_enable_nested();
+ }
+ EXPORT_SYMBOL(__mod_zone_page_state);
+
+@@ -393,8 +391,7 @@ void __mod_node_page_state(struct pglist
+ }
+
+ /* See __mod_node_page_state */
+- if (IS_ENABLED(CONFIG_PREEMPT_RT))
+- preempt_disable();
++ preempt_disable_nested();
+
+ x = delta + __this_cpu_read(*p);
+
+@@ -406,8 +403,7 @@ void __mod_node_page_state(struct pglist
+ }
+ __this_cpu_write(*p, x);
+
+- if (IS_ENABLED(CONFIG_PREEMPT_RT))
+- preempt_enable();
++ preempt_enable_nested();
+ }
+ EXPORT_SYMBOL(__mod_node_page_state);
+
+@@ -441,8 +437,7 @@ void __inc_zone_state(struct zone *zone,
+ s8 v, t;
+
+ /* See __mod_node_page_state */
+- if (IS_ENABLED(CONFIG_PREEMPT_RT))
+- preempt_disable();
++ preempt_disable_nested();
+
+ v = __this_cpu_inc_return(*p);
+ t = __this_cpu_read(pcp->stat_threshold);
+@@ -453,8 +448,7 @@ void __inc_zone_state(struct zone *zone,
+ __this_cpu_write(*p, -overstep);
+ }
+
+- if (IS_ENABLED(CONFIG_PREEMPT_RT))
+- preempt_enable();
++ preempt_enable_nested();
+ }
+
+ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+@@ -466,8 +460,7 @@ void __inc_node_state(struct pglist_data
+ VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
+
+ /* See __mod_node_page_state */
+- if (IS_ENABLED(CONFIG_PREEMPT_RT))
+- preempt_disable();
++ preempt_disable_nested();
+
+ v = __this_cpu_inc_return(*p);
+ t = __this_cpu_read(pcp->stat_threshold);
+@@ -478,8 +471,7 @@ void __inc_node_state(struct pglist_data
+ __this_cpu_write(*p, -overstep);
+ }
+
+- if (IS_ENABLED(CONFIG_PREEMPT_RT))
+- preempt_enable();
++ preempt_enable_nested();
+ }
+
+ void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
+@@ -501,8 +493,7 @@ void __dec_zone_state(struct zone *zone,
+ s8 v, t;
+
+ /* See __mod_node_page_state */
+- if (IS_ENABLED(CONFIG_PREEMPT_RT))
+- preempt_disable();
++ preempt_disable_nested();
+
+ v = __this_cpu_dec_return(*p);
+ t = __this_cpu_read(pcp->stat_threshold);
+@@ -513,8 +504,7 @@ void __dec_zone_state(struct zone *zone,
+ __this_cpu_write(*p, overstep);
+ }
+
+- if (IS_ENABLED(CONFIG_PREEMPT_RT))
+- preempt_enable();
++ preempt_enable_nested();
+ }
+
+ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+@@ -526,8 +516,7 @@ void __dec_node_state(struct pglist_data
+ VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
+
+ /* See __mod_node_page_state */
+- if (IS_ENABLED(CONFIG_PREEMPT_RT))
+- preempt_disable();
++ preempt_disable_nested();
+
+ v = __this_cpu_dec_return(*p);
+ t = __this_cpu_read(pcp->stat_threshold);
+@@ -538,8 +527,7 @@ void __dec_node_state(struct pglist_data
+ __this_cpu_write(*p, overstep);
+ }
+
+- if (IS_ENABLED(CONFIG_PREEMPT_RT))
+- preempt_enable();
++ preempt_enable_nested();
+ }
+
+ void __dec_zone_page_state(struct page *page, enum zone_stat_item item)