summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/block-mq-don-t-complete-requests-via-IPI.patch
blob: 73ad78dba7d50e0a5dae271d756fb8bc8a705db1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 29 Jan 2015 15:10:08 +0100
Subject: block/mq: don't complete requests via IPI
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.5-rt4.tar.xz

The IPI runs in hardirq context and there are sleeping locks. This patch
moves the completion into a workqueue.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 block/blk-core.c       |    3 +++
 block/blk-mq.c         |   23 +++++++++++++++++++++++
 include/linux/blk-mq.h |    2 +-
 include/linux/blkdev.h |    3 +++
 4 files changed, 30 insertions(+), 1 deletion(-)

--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -189,6 +189,9 @@ void blk_rq_init(struct request_queue *q
 
 	INIT_LIST_HEAD(&rq->queuelist);
 	INIT_LIST_HEAD(&rq->timeout_list);
+#ifdef CONFIG_PREEMPT_RT_FULL
+	INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
+#endif
 	rq->cpu = -1;
 	rq->q = q;
 	rq->__sector = (sector_t) -1;
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -320,6 +320,9 @@ static struct request *blk_mq_rq_ctx_ini
 	rq->extra_len = 0;
 	rq->__deadline = 0;
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+	INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
+#endif
 	INIT_LIST_HEAD(&rq->timeout_list);
 	rq->timeout = 0;
 
@@ -547,12 +550,24 @@ void blk_mq_end_request(struct request *
 }
 EXPORT_SYMBOL(blk_mq_end_request);
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+
+void __blk_mq_complete_request_remote_work(struct work_struct *work)
+{
+	struct request *rq = container_of(work, struct request, work);
+
+	rq->q->softirq_done_fn(rq);
+}
+
+#else
+
 static void __blk_mq_complete_request_remote(void *data)
 {
 	struct request *rq = data;
 
 	rq->q->softirq_done_fn(rq);
 }
+#endif
 
 static void __blk_mq_complete_request(struct request *rq)
 {
@@ -575,10 +590,18 @@ static void __blk_mq_complete_request(st
 		shared = cpus_share_cache(cpu, ctx->cpu);
 
 	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
+#ifdef CONFIG_PREEMPT_RT_FULL
+		/*
+		 * We could force QUEUE_FLAG_SAME_FORCE then we would not get in
+		 * here. But we could try to invoke it one the CPU like this.
+		 */
+		schedule_work_on(ctx->cpu, &rq->work);
+#else
 		rq->csd.func = __blk_mq_complete_request_remote;
 		rq->csd.info = rq;
 		rq->csd.flags = 0;
 		smp_call_function_single_async(ctx->cpu, &rq->csd);
+#endif
 	} else {
 		rq->q->softirq_done_fn(rq);
 	}
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -249,7 +249,7 @@ static inline u16 blk_mq_unique_tag_to_t
 	return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
 }
 
-
+void __blk_mq_complete_request_remote_work(struct work_struct *work);
 int blk_mq_request_started(struct request *rq);
 void blk_mq_start_request(struct request *rq);
 void blk_mq_end_request(struct request *rq, blk_status_t error);
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -149,6 +149,9 @@ enum mq_rq_state {
  */
 struct request {
 	struct request_queue *q;
+#ifdef CONFIG_PREEMPT_RT_FULL
+	struct work_struct work;
+#endif
 	struct blk_mq_ctx *mq_ctx;
 
 	int cpu;