summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/random-avoid-preempt_disable-ed-section.patch
blob: 6f93903d9267bf201f55512f8beb17e2f0855b4d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 12 May 2017 15:46:17 +0200
Subject: [PATCH] random: avoid preempt_disable()ed section
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.13-rt10.tar.xz

extract_crng() will use sleeping locks while in a preempt_disable()
section due to get_cpu_var().
Work around it with local_locks.

Cc: stable-rt@vger.kernel.org # where it applies to
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 drivers/char/random.c |   11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -265,6 +265,7 @@
 #include <linux/syscalls.h>
 #include <linux/completion.h>
 #include <linux/uuid.h>
+#include <linux/locallock.h>
 #include <crypto/chacha20.h>
 
 #include <asm/processor.h>
@@ -2223,6 +2224,7 @@ static rwlock_t batched_entropy_reset_lo
  * at any point prior.
  */
 static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
+static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_u64_lock);
 u64 get_random_u64(void)
 {
 	u64 ret;
@@ -2243,7 +2245,7 @@ u64 get_random_u64(void)
 	warn_unseeded_randomness(&previous);
 
 	use_lock = READ_ONCE(crng_init) < 2;
-	batch = &get_cpu_var(batched_entropy_u64);
+	batch = &get_locked_var(batched_entropy_u64_lock, batched_entropy_u64);
 	if (use_lock)
 		read_lock_irqsave(&batched_entropy_reset_lock, flags);
 	if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
@@ -2253,12 +2255,13 @@ u64 get_random_u64(void)
 	ret = batch->entropy_u64[batch->position++];
 	if (use_lock)
 		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
-	put_cpu_var(batched_entropy_u64);
+	put_locked_var(batched_entropy_u64_lock, batched_entropy_u64);
 	return ret;
 }
 EXPORT_SYMBOL(get_random_u64);
 
 static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
+static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_u32_lock);
 u32 get_random_u32(void)
 {
 	u32 ret;
@@ -2273,7 +2276,7 @@ u32 get_random_u32(void)
 	warn_unseeded_randomness(&previous);
 
 	use_lock = READ_ONCE(crng_init) < 2;
-	batch = &get_cpu_var(batched_entropy_u32);
+	batch = &get_locked_var(batched_entropy_u32_lock, batched_entropy_u32);
 	if (use_lock)
 		read_lock_irqsave(&batched_entropy_reset_lock, flags);
 	if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
@@ -2283,7 +2286,7 @@ u32 get_random_u32(void)
 	ret = batch->entropy_u32[batch->position++];
 	if (use_lock)
 		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
-	put_cpu_var(batched_entropy_u32);
+	put_locked_var(batched_entropy_u32_lock, batched_entropy_u32);
 	return ret;
 }
 EXPORT_SYMBOL(get_random_u32);