summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/of-allocate-free-phandle-cache-outside-of-the-devtre.patch
blob: 0a1ce3924d748eebf27e5cb66a0cbea68f41df0a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 31 Aug 2018 14:16:30 +0200
Subject: [PATCH] of: allocate / free phandle cache outside of the devtree_lock
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.5-rt4.tar.xz

The phandle cache code allocates memory while holding devtree_lock which
is a raw_spinlock_t. Memory allocation (and free()) is not possible on
RT while a raw_spinlock_t is held.
Invoke the kfree() and kcalloc() while the lock is dropped.

Cc: Rob Herring <robh+dt@kernel.org>
Cc: Frank Rowand <frowand.list@gmail.com>
Cc: devicetree@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 drivers/of/base.c |   22 ++++++++++++++--------
 1 file changed, 14 insertions(+), 8 deletions(-)

--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -130,46 +130,52 @@ void of_populate_phandle_cache(void)
 	u32 cache_entries;
 	struct device_node *np;
 	u32 phandles = 0;
+	struct device_node **shadow;
 
 	raw_spin_lock_irqsave(&devtree_lock, flags);
 
-	kfree(phandle_cache);
+	shadow = phandle_cache;
 	phandle_cache = NULL;
 
 	for_each_of_allnodes(np)
 		if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
 			phandles++;
+	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+	kfree(shadow);
 
 	if (!phandles)
-		goto out;
+		return;
 
 	cache_entries = roundup_pow_of_two(phandles);
 	phandle_cache_mask = cache_entries - 1;
 
-	phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
-				GFP_ATOMIC);
-	if (!phandle_cache)
-		goto out;
+	shadow = kcalloc(cache_entries, sizeof(*phandle_cache), GFP_KERNEL);
+	if (!shadow)
+		return;
+
+	raw_spin_lock_irqsave(&devtree_lock, flags);
+	phandle_cache = shadow;
 
 	for_each_of_allnodes(np)
 		if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
 			phandle_cache[np->phandle & phandle_cache_mask] = np;
 
-out:
 	raw_spin_unlock_irqrestore(&devtree_lock, flags);
 }
 
 int of_free_phandle_cache(void)
 {
 	unsigned long flags;
+	struct device_node **shadow;
 
 	raw_spin_lock_irqsave(&devtree_lock, flags);
 
-	kfree(phandle_cache);
+	shadow = phandle_cache;
 	phandle_cache = NULL;
 
 	raw_spin_unlock_irqrestore(&devtree_lock, flags);
 
+	kfree(shadow);
 	return 0;
 }
 #if !defined(CONFIG_MODULES)