summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch')
-rw-r--r--debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch12
1 files changed, 6 insertions, 6 deletions
diff --git a/debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch b/debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch
index 10ea368fb..75289d6aa 100644
--- a/debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch
+++ b/debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch
@@ -1,7 +1,7 @@
Subject: mm/vmalloc: Another preempt disable region which sucks
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 12 Jul 2011 11:39:36 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2.17-rt9.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.4/older/patches-5.4.3-rt1.tar.xz
Avoid the preempt disable version of get_cpu_var(). The inner-lock should
provide enough serialisation.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
-@@ -1406,7 +1406,7 @@ static void *new_vmap_block(unsigned int
+@@ -1462,7 +1462,7 @@ static void *new_vmap_block(unsigned int
struct vmap_block *vb;
struct vmap_area *va;
unsigned long vb_idx;
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *vaddr;
node = numa_node_id();
-@@ -1449,11 +1449,12 @@ static void *new_vmap_block(unsigned int
+@@ -1505,11 +1505,12 @@ static void *new_vmap_block(unsigned int
BUG_ON(err);
radix_tree_preload_end();
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return vaddr;
}
-@@ -1522,6 +1523,7 @@ static void *vb_alloc(unsigned long size
+@@ -1578,6 +1579,7 @@ static void *vb_alloc(unsigned long size
struct vmap_block *vb;
void *vaddr = NULL;
unsigned int order;
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
BUG_ON(offset_in_page(size));
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
-@@ -1536,7 +1538,8 @@ static void *vb_alloc(unsigned long size
+@@ -1592,7 +1594,8 @@ static void *vb_alloc(unsigned long size
order = get_order(size);
rcu_read_lock();
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
unsigned long pages_off;
-@@ -1559,7 +1562,7 @@ static void *vb_alloc(unsigned long size
+@@ -1615,7 +1618,7 @@ static void *vb_alloc(unsigned long size
break;
}