summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0001-mm-page_alloc-Split-drain_local_pages.patch
blob: b44ec9f93bb837b3a0a4c4b371fab4094e5a04b6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
From: Anna-Maria Gleixner <anna-maria@linutronix.de>
Date: Thu, 18 Apr 2019 11:09:04 +0200
Subject: [PATCH 1/4] mm/page_alloc: Split drain_local_pages()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.4/older/patches-5.4.13-rt7.tar.xz

Splitting the functionality of drain_local_pages() into a separate
function. This is a preparatory work for introducing the static key
dependend locking mechanism.

No functional change.

Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 include/linux/gfp.h |    1 +
 mm/page_alloc.c     |   13 +++++++++----
 2 files changed, 10 insertions(+), 4 deletions(-)

--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -580,6 +580,7 @@ extern void page_frag_free(void *addr);
 void page_alloc_init(void);
 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
 void drain_all_pages(struct zone *zone);
+void drain_cpu_pages(unsigned int cpu, struct zone *zone);
 void drain_local_pages(struct zone *zone);
 
 void page_alloc_init_late(void);
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2894,6 +2894,14 @@ static void drain_pages(unsigned int cpu
 	}
 }
 
+void drain_cpu_pages(unsigned int cpu, struct zone *zone)
+{
+	if (zone)
+		drain_pages_zone(cpu, zone);
+	else
+		drain_pages(cpu);
+}
+
 /*
  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
  *
@@ -2904,10 +2912,7 @@ void drain_local_pages(struct zone *zone
 {
 	int cpu = smp_processor_id();
 
-	if (zone)
-		drain_pages_zone(cpu, zone);
-	else
-		drain_pages(cpu);
+	drain_cpu_pages(cpu, zone);
 }
 
 static void drain_local_pages_wq(struct work_struct *work)