summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/crypto-limit-more-FPU-enabled-sections.patch
blob: ddb9a35c664926a4e744cc0a92e8c1f1cc57f104 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 30 Nov 2017 13:40:10 +0100
Subject: [PATCH] crypto: limit more FPU-enabled sections
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2.10-rt5.tar.xz

Those crypto drivers use SSE/AVX/… for their crypto work and in order to
do so in kernel they need to enable the "FPU" in kernel mode which
disables preemption.
There are two problems with the way they are used:
- the while loop which processes X bytes may create latency spikes and
  should be avoided or limited.
- the cipher-walk-next part may allocate/free memory and may use
  kmap_atomic().

The whole kernel_fpu_begin()/end() processing isn't probably that cheap.
It most likely makes sense to process as much of those as possible in one
go. The new *_fpu_sched_rt() schedules only if a RT task is pending.

Probably we should measure the performance those ciphers in pure SW
mode and with this optimisations to see if it makes sense to keep them
for RT.

This kernel_fpu_resched() makes the code more preemptible which might hurt
performance.

Cc: stable-rt@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 arch/x86/crypto/chacha_glue.c  |   11 ++---------
 arch/x86/include/asm/fpu/api.h |    1 +
 arch/x86/kernel/fpu/core.c     |   12 ++++++++++++
 3 files changed, 15 insertions(+), 9 deletions(-)

--- a/arch/x86/crypto/chacha_glue.c
+++ b/arch/x86/crypto/chacha_glue.c
@@ -127,7 +127,6 @@ static int chacha_simd_stream_xor(struct
 				  struct chacha_ctx *ctx, u8 *iv)
 {
 	u32 *state, state_buf[16 + 2] __aligned(8);
-	int next_yield = 4096; /* bytes until next FPU yield */
 	int err = 0;
 
 	BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
@@ -140,20 +139,14 @@ static int chacha_simd_stream_xor(struct
 
 		if (nbytes < walk->total) {
 			nbytes = round_down(nbytes, walk->stride);
-			next_yield -= nbytes;
 		}
 
 		chacha_dosimd(state, walk->dst.virt.addr, walk->src.virt.addr,
 			      nbytes, ctx->nrounds);
 
-		if (next_yield <= 0) {
-			/* temporarily allow preemption */
-			kernel_fpu_end();
-			kernel_fpu_begin();
-			next_yield = 4096;
-		}
-
+		kernel_fpu_end();
 		err = skcipher_walk_done(walk, walk->nbytes - nbytes);
+		kernel_fpu_begin();
 	}
 
 	return err;
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -23,6 +23,7 @@ extern void kernel_fpu_begin(void);
 extern void kernel_fpu_end(void);
 extern bool irq_fpu_usable(void);
 extern void fpregs_mark_activate(void);
+extern void kernel_fpu_resched(void);
 
 /*
  * Use fpregs_lock() while editing CPU's FPU registers or fpu->state.
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -134,6 +134,18 @@ void kernel_fpu_end(void)
 }
 EXPORT_SYMBOL_GPL(kernel_fpu_end);
 
+void kernel_fpu_resched(void)
+{
+	WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
+
+	if (should_resched(PREEMPT_OFFSET)) {
+		kernel_fpu_end();
+		cond_resched();
+		kernel_fpu_begin();
+	}
+}
+EXPORT_SYMBOL_GPL(kernel_fpu_resched);
+
 /*
  * Save the FPU state (mark it for reload if necessary):
  *