summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0004-cgroup-Acquire-cgroup_rstat_lock-with-enabled-interr.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0004-cgroup-Acquire-cgroup_rstat_lock-with-enabled-interr.patch')
-rw-r--r--debian/patches-rt/0004-cgroup-Acquire-cgroup_rstat_lock-with-enabled-interr.patch72
1 files changed, 72 insertions, 0 deletions
diff --git a/debian/patches-rt/0004-cgroup-Acquire-cgroup_rstat_lock-with-enabled-interr.patch b/debian/patches-rt/0004-cgroup-Acquire-cgroup_rstat_lock-with-enabled-interr.patch
new file mode 100644
index 000000000..d75a1b4c5
--- /dev/null
+++ b/debian/patches-rt/0004-cgroup-Acquire-cgroup_rstat_lock-with-enabled-interr.patch
@@ -0,0 +1,72 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 16 Aug 2019 12:49:36 +0200
+Subject: [PATCH 4/4] cgroup: Acquire cgroup_rstat_lock with enabled interrupts
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.4/older/patches-5.4.3-rt1.tar.xz
+
+There is no need to disable interrupts while cgroup_rstat_lock is
+acquired. The lock is never used in-IRQ context so a simple spin_lock()
+is enough for synchronisation purpose.
+
+Acquire cgroup_rstat_lock without disabling interrupts and ensure that
+cgroup_rstat_cpu_lock is acquired with disabled interrupts (this one is
+acquired in-IRQ context).
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/cgroup/rstat.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/kernel/cgroup/rstat.c
++++ b/kernel/cgroup/rstat.c
+@@ -161,17 +161,17 @@ static void cgroup_rstat_flush_locked(st
+ cpu);
+ struct cgroup *pos = NULL;
+
+- raw_spin_lock(cpu_lock);
++ raw_spin_lock_irq(cpu_lock);
+ while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu)))
+ cgroup_base_stat_flush(pos, cpu);
+
+- raw_spin_unlock(cpu_lock);
++ raw_spin_unlock_irq(cpu_lock);
+
+ if (need_resched() || spin_needbreak(&cgroup_rstat_lock)) {
+- spin_unlock_irq(&cgroup_rstat_lock);
++ spin_unlock(&cgroup_rstat_lock);
+ if (!cond_resched())
+ cpu_relax();
+- spin_lock_irq(&cgroup_rstat_lock);
++ spin_lock(&cgroup_rstat_lock);
+ }
+ }
+ }
+@@ -193,9 +193,9 @@ void cgroup_rstat_flush(struct cgroup *c
+ {
+ might_sleep();
+
+- spin_lock_irq(&cgroup_rstat_lock);
++ spin_lock(&cgroup_rstat_lock);
+ cgroup_rstat_flush_locked(cgrp);
+- spin_unlock_irq(&cgroup_rstat_lock);
++ spin_unlock(&cgroup_rstat_lock);
+ }
+
+ /**
+@@ -211,7 +211,7 @@ static void cgroup_rstat_flush_hold(stru
+ __acquires(&cgroup_rstat_lock)
+ {
+ might_sleep();
+- spin_lock_irq(&cgroup_rstat_lock);
++ spin_lock(&cgroup_rstat_lock);
+ cgroup_rstat_flush_locked(cgrp);
+ }
+
+@@ -221,7 +221,7 @@ static void cgroup_rstat_flush_hold(stru
+ static void cgroup_rstat_flush_release(void)
+ __releases(&cgroup_rstat_lock)
+ {
+- spin_unlock_irq(&cgroup_rstat_lock);
++ spin_unlock(&cgroup_rstat_lock);
+ }
+
+ int cgroup_rstat_init(struct cgroup *cgrp)