summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/kmemleak-Change-the-lock-of-kmemleak_object-to-raw_s.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/kmemleak-Change-the-lock-of-kmemleak_object-to-raw_s.patch')
-rw-r--r--debian/patches-rt/kmemleak-Change-the-lock-of-kmemleak_object-to-raw_s.patch272
1 files changed, 272 insertions, 0 deletions
diff --git a/debian/patches-rt/kmemleak-Change-the-lock-of-kmemleak_object-to-raw_s.patch b/debian/patches-rt/kmemleak-Change-the-lock-of-kmemleak_object-to-raw_s.patch
new file mode 100644
index 000000000..e50bb401c
--- /dev/null
+++ b/debian/patches-rt/kmemleak-Change-the-lock-of-kmemleak_object-to-raw_s.patch
@@ -0,0 +1,272 @@
+From: Liu Haitao <haitao.liu@windriver.com>
+Date: Fri, 27 Sep 2019 16:22:30 +0800
+Subject: [PATCH] kmemleak: Change the lock of kmemleak_object to
+ raw_spinlock_t
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.4/older/patches-5.4.3-rt1.tar.xz
+
+The commit ("kmemleak: Turn kmemleak_lock to raw spinlock on RT")
+changed the kmemleak_lock to raw spinlock. However the
+kmemleak_object->lock is held after the kmemleak_lock is held in
+scan_block().
+
+Make the object->lock a raw_spinlock_t.
+
+Cc: stable-rt@vger.kernel.org
+Link: https://lkml.kernel.org/r/20190927082230.34152-1-yongxin.liu@windriver.com
+Signed-off-by: Liu Haitao <haitao.liu@windriver.com>
+Signed-off-by: Yongxin Liu <yongxin.liu@windriver.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/kmemleak.c | 68 +++++++++++++++++++++++++++++-----------------------------
+ 1 file changed, 34 insertions(+), 34 deletions(-)
+
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -135,7 +135,7 @@ struct kmemleak_scan_area {
+ * (use_count) and freed using the RCU mechanism.
+ */
+ struct kmemleak_object {
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ unsigned int flags; /* object status flags */
+ struct list_head object_list;
+ struct list_head gray_list;
+@@ -585,7 +585,7 @@ static struct kmemleak_object *create_ob
+ INIT_LIST_HEAD(&object->object_list);
+ INIT_LIST_HEAD(&object->gray_list);
+ INIT_HLIST_HEAD(&object->area_list);
+- spin_lock_init(&object->lock);
++ raw_spin_lock_init(&object->lock);
+ atomic_set(&object->use_count, 1);
+ object->flags = OBJECT_ALLOCATED;
+ object->pointer = ptr;
+@@ -667,9 +667,9 @@ static void __delete_object(struct kmeml
+ * Locking here also ensures that the corresponding memory block
+ * cannot be freed when it is being scanned.
+ */
+- spin_lock_irqsave(&object->lock, flags);
++ raw_spin_lock_irqsave(&object->lock, flags);
+ object->flags &= ~OBJECT_ALLOCATED;
+- spin_unlock_irqrestore(&object->lock, flags);
++ raw_spin_unlock_irqrestore(&object->lock, flags);
+ put_object(object);
+ }
+
+@@ -739,9 +739,9 @@ static void paint_it(struct kmemleak_obj
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&object->lock, flags);
++ raw_spin_lock_irqsave(&object->lock, flags);
+ __paint_it(object, color);
+- spin_unlock_irqrestore(&object->lock, flags);
++ raw_spin_unlock_irqrestore(&object->lock, flags);
+ }
+
+ static void paint_ptr(unsigned long ptr, int color)
+@@ -798,7 +798,7 @@ static void add_scan_area(unsigned long
+ if (scan_area_cache)
+ area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
+
+- spin_lock_irqsave(&object->lock, flags);
++ raw_spin_lock_irqsave(&object->lock, flags);
+ if (!area) {
+ pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
+ /* mark the object for full scan to avoid false positives */
+@@ -820,7 +820,7 @@ static void add_scan_area(unsigned long
+
+ hlist_add_head(&area->node, &object->area_list);
+ out_unlock:
+- spin_unlock_irqrestore(&object->lock, flags);
++ raw_spin_unlock_irqrestore(&object->lock, flags);
+ put_object(object);
+ }
+
+@@ -842,9 +842,9 @@ static void object_set_excess_ref(unsign
+ return;
+ }
+
+- spin_lock_irqsave(&object->lock, flags);
++ raw_spin_lock_irqsave(&object->lock, flags);
+ object->excess_ref = excess_ref;
+- spin_unlock_irqrestore(&object->lock, flags);
++ raw_spin_unlock_irqrestore(&object->lock, flags);
+ put_object(object);
+ }
+
+@@ -864,9 +864,9 @@ static void object_no_scan(unsigned long
+ return;
+ }
+
+- spin_lock_irqsave(&object->lock, flags);
++ raw_spin_lock_irqsave(&object->lock, flags);
+ object->flags |= OBJECT_NO_SCAN;
+- spin_unlock_irqrestore(&object->lock, flags);
++ raw_spin_unlock_irqrestore(&object->lock, flags);
+ put_object(object);
+ }
+
+@@ -1026,9 +1026,9 @@ void __ref kmemleak_update_trace(const v
+ return;
+ }
+
+- spin_lock_irqsave(&object->lock, flags);
++ raw_spin_lock_irqsave(&object->lock, flags);
+ object->trace_len = __save_stack_trace(object->trace);
+- spin_unlock_irqrestore(&object->lock, flags);
++ raw_spin_unlock_irqrestore(&object->lock, flags);
+
+ put_object(object);
+ }
+@@ -1268,7 +1268,7 @@ static void scan_block(void *_start, voi
+ * previously acquired in scan_object(). These locks are
+ * enclosed by scan_mutex.
+ */
+- spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
++ raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
+ /* only pass surplus references (object already gray) */
+ if (color_gray(object)) {
+ excess_ref = object->excess_ref;
+@@ -1277,7 +1277,7 @@ static void scan_block(void *_start, voi
+ excess_ref = 0;
+ update_refs(object);
+ }
+- spin_unlock(&object->lock);
++ raw_spin_unlock(&object->lock);
+
+ if (excess_ref) {
+ object = lookup_object(excess_ref, 0);
+@@ -1286,9 +1286,9 @@ static void scan_block(void *_start, voi
+ if (object == scanned)
+ /* circular reference, ignore */
+ continue;
+- spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
++ raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
+ update_refs(object);
+- spin_unlock(&object->lock);
++ raw_spin_unlock(&object->lock);
+ }
+ }
+ raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
+@@ -1324,7 +1324,7 @@ static void scan_object(struct kmemleak_
+ * Once the object->lock is acquired, the corresponding memory block
+ * cannot be freed (the same lock is acquired in delete_object).
+ */
+- spin_lock_irqsave(&object->lock, flags);
++ raw_spin_lock_irqsave(&object->lock, flags);
+ if (object->flags & OBJECT_NO_SCAN)
+ goto out;
+ if (!(object->flags & OBJECT_ALLOCATED))
+@@ -1344,9 +1344,9 @@ static void scan_object(struct kmemleak_
+ if (start >= end)
+ break;
+
+- spin_unlock_irqrestore(&object->lock, flags);
++ raw_spin_unlock_irqrestore(&object->lock, flags);
+ cond_resched();
+- spin_lock_irqsave(&object->lock, flags);
++ raw_spin_lock_irqsave(&object->lock, flags);
+ } while (object->flags & OBJECT_ALLOCATED);
+ } else
+ hlist_for_each_entry(area, &object->area_list, node)
+@@ -1354,7 +1354,7 @@ static void scan_object(struct kmemleak_
+ (void *)(area->start + area->size),
+ object);
+ out:
+- spin_unlock_irqrestore(&object->lock, flags);
++ raw_spin_unlock_irqrestore(&object->lock, flags);
+ }
+
+ /*
+@@ -1407,7 +1407,7 @@ static void kmemleak_scan(void)
+ /* prepare the kmemleak_object's */
+ rcu_read_lock();
+ list_for_each_entry_rcu(object, &object_list, object_list) {
+- spin_lock_irqsave(&object->lock, flags);
++ raw_spin_lock_irqsave(&object->lock, flags);
+ #ifdef DEBUG
+ /*
+ * With a few exceptions there should be a maximum of
+@@ -1424,7 +1424,7 @@ static void kmemleak_scan(void)
+ if (color_gray(object) && get_object(object))
+ list_add_tail(&object->gray_list, &gray_list);
+
+- spin_unlock_irqrestore(&object->lock, flags);
++ raw_spin_unlock_irqrestore(&object->lock, flags);
+ }
+ rcu_read_unlock();
+
+@@ -1492,14 +1492,14 @@ static void kmemleak_scan(void)
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(object, &object_list, object_list) {
+- spin_lock_irqsave(&object->lock, flags);
++ raw_spin_lock_irqsave(&object->lock, flags);
+ if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
+ && update_checksum(object) && get_object(object)) {
+ /* color it gray temporarily */
+ object->count = object->min_count;
+ list_add_tail(&object->gray_list, &gray_list);
+ }
+- spin_unlock_irqrestore(&object->lock, flags);
++ raw_spin_unlock_irqrestore(&object->lock, flags);
+ }
+ rcu_read_unlock();
+
+@@ -1519,7 +1519,7 @@ static void kmemleak_scan(void)
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(object, &object_list, object_list) {
+- spin_lock_irqsave(&object->lock, flags);
++ raw_spin_lock_irqsave(&object->lock, flags);
+ if (unreferenced_object(object) &&
+ !(object->flags & OBJECT_REPORTED)) {
+ object->flags |= OBJECT_REPORTED;
+@@ -1529,7 +1529,7 @@ static void kmemleak_scan(void)
+
+ new_leaks++;
+ }
+- spin_unlock_irqrestore(&object->lock, flags);
++ raw_spin_unlock_irqrestore(&object->lock, flags);
+ }
+ rcu_read_unlock();
+
+@@ -1681,10 +1681,10 @@ static int kmemleak_seq_show(struct seq_
+ struct kmemleak_object *object = v;
+ unsigned long flags;
+
+- spin_lock_irqsave(&object->lock, flags);
++ raw_spin_lock_irqsave(&object->lock, flags);
+ if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
+ print_unreferenced(seq, object);
+- spin_unlock_irqrestore(&object->lock, flags);
++ raw_spin_unlock_irqrestore(&object->lock, flags);
+ return 0;
+ }
+
+@@ -1714,9 +1714,9 @@ static int dump_str_object_info(const ch
+ return -EINVAL;
+ }
+
+- spin_lock_irqsave(&object->lock, flags);
++ raw_spin_lock_irqsave(&object->lock, flags);
+ dump_object_info(object);
+- spin_unlock_irqrestore(&object->lock, flags);
++ raw_spin_unlock_irqrestore(&object->lock, flags);
+
+ put_object(object);
+ return 0;
+@@ -1735,11 +1735,11 @@ static void kmemleak_clear(void)
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(object, &object_list, object_list) {
+- spin_lock_irqsave(&object->lock, flags);
++ raw_spin_lock_irqsave(&object->lock, flags);
+ if ((object->flags & OBJECT_REPORTED) &&
+ unreferenced_object(object))
+ __paint_it(object, KMEMLEAK_GREY);
+- spin_unlock_irqrestore(&object->lock, flags);
++ raw_spin_unlock_irqrestore(&object->lock, flags);
+ }
+ rcu_read_unlock();
+