summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch')
-rw-r--r--debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch162
1 files changed, 73 insertions, 89 deletions
diff --git a/debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch b/debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch
index c10fcc141..6196f2b88 100644
--- a/debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch
+++ b/debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 28 May 2018 15:24:22 +0200
Subject: [PATCH 3/4] mm/SLxB: change list_lock to raw_spinlock_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The list_lock is used with used with IRQs off on RT. Make it a raw_spinlock_t
otherwise the interrupts won't be disabled on -RT. The locking rules remain
@@ -12,10 +12,10 @@ file for struct kmem_cache_node defintion.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- mm/slab.c | 94 +++++++++++++++++++++++++++++++-------------------------------
+ mm/slab.c | 90 +++++++++++++++++++++++++++++++-------------------------------
mm/slab.h | 2 -
- mm/slub.c | 50 ++++++++++++++++----------------
- 3 files changed, 73 insertions(+), 73 deletions(-)
+ mm/slub.c | 50 +++++++++++++++++-----------------
+ 3 files changed, 71 insertions(+), 71 deletions(-)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
parent->free_objects = 0;
parent->free_touched = 0;
}
-@@ -587,9 +587,9 @@ static noinline void cache_free_pfmemall
+@@ -564,9 +564,9 @@ static noinline void cache_free_pfmemall
page_node = page_to_nid(page);
n = get_node(cachep, page_node);
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
-@@ -718,7 +718,7 @@ static void __drain_alien_cache(struct k
+@@ -694,7 +694,7 @@ static void __drain_alien_cache(struct k
struct kmem_cache_node *n = get_node(cachep, node);
if (ac->avail) {
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Stuff objects into the remote nodes shared array first.
* That way we could avoid the overhead of putting the objects
-@@ -729,7 +729,7 @@ static void __drain_alien_cache(struct k
+@@ -705,7 +705,7 @@ static void __drain_alien_cache(struct k
free_block(cachep, ac->entry, ac->avail, node, list);
ac->avail = 0;
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -802,9 +802,9 @@ static int __cache_free_alien(struct kme
+@@ -778,9 +778,9 @@ static int __cache_free_alien(struct kme
slabs_destroy(cachep, &list);
} else {
n = get_node(cachep, page_node);
@@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
return 1;
-@@ -845,10 +845,10 @@ static int init_cache_node(struct kmem_c
+@@ -821,10 +821,10 @@ static int init_cache_node(struct kmem_c
*/
n = get_node(cachep, node);
if (n) {
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -927,7 +927,7 @@ static int setup_kmem_cache_node(struct
+@@ -903,7 +903,7 @@ static int setup_kmem_cache_node(struct
goto fail;
n = get_node(cachep, node);
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (n->shared && force_change) {
free_block(cachep, n->shared->entry,
n->shared->avail, node, &list);
-@@ -945,7 +945,7 @@ static int setup_kmem_cache_node(struct
+@@ -921,7 +921,7 @@ static int setup_kmem_cache_node(struct
new_alien = NULL;
}
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
/*
-@@ -984,7 +984,7 @@ static void cpuup_canceled(long cpu)
+@@ -960,7 +960,7 @@ static void cpuup_canceled(long cpu)
if (!n)
continue;
@@ -110,8 +110,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Free limit for this kmem_cache_node */
n->free_limit -= cachep->batchcount;
-@@ -997,7 +997,7 @@ static void cpuup_canceled(long cpu)
- }
+@@ -971,7 +971,7 @@ static void cpuup_canceled(long cpu)
+ nc->avail = 0;
if (!cpumask_empty(mask)) {
- spin_unlock_irq(&n->list_lock);
@@ -119,7 +119,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto free_slab;
}
-@@ -1011,7 +1011,7 @@ static void cpuup_canceled(long cpu)
+@@ -985,7 +985,7 @@ static void cpuup_canceled(long cpu)
alien = n->alien;
n->alien = NULL;
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
kfree(shared);
if (alien) {
-@@ -1195,7 +1195,7 @@ static void __init init_list(struct kmem
+@@ -1169,7 +1169,7 @@ static void __init init_list(struct kmem
/*
* Do not assume that spinlocks can be initialized via memcpy:
*/
@@ -137,7 +137,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
MAKE_ALL_LISTS(cachep, ptr, nodeid);
cachep->node[nodeid] = ptr;
-@@ -1366,11 +1366,11 @@ slab_out_of_memory(struct kmem_cache *ca
+@@ -1340,11 +1340,11 @@ slab_out_of_memory(struct kmem_cache *ca
for_each_kmem_cache_node(cachep, node, n) {
unsigned long total_slabs, free_slabs, free_objs;
@@ -151,7 +151,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
node, total_slabs - free_slabs, total_slabs,
-@@ -2165,7 +2165,7 @@ static void check_spinlock_acquired(stru
+@@ -2107,7 +2107,7 @@ static void check_spinlock_acquired(stru
{
#ifdef CONFIG_SMP
check_irq_off();
@@ -160,7 +160,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
}
-@@ -2173,7 +2173,7 @@ static void check_spinlock_acquired_node
+@@ -2115,7 +2115,7 @@ static void check_spinlock_acquired_node
{
#ifdef CONFIG_SMP
check_irq_off();
@@ -169,7 +169,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
}
-@@ -2213,9 +2213,9 @@ static void do_drain(void *arg)
+@@ -2155,9 +2155,9 @@ static void do_drain(void *arg)
check_irq_off();
ac = cpu_cache_get(cachep);
n = get_node(cachep, node);
@@ -181,7 +181,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
ac->avail = 0;
}
-@@ -2233,9 +2233,9 @@ static void drain_cpu_caches(struct kmem
+@@ -2175,9 +2175,9 @@ static void drain_cpu_caches(struct kmem
drain_alien_cache(cachep, n->alien);
for_each_kmem_cache_node(cachep, node, n) {
@@ -193,7 +193,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
-@@ -2257,10 +2257,10 @@ static int drain_freelist(struct kmem_ca
+@@ -2199,10 +2199,10 @@ static int drain_freelist(struct kmem_ca
nr_freed = 0;
while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
@@ -206,7 +206,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
}
-@@ -2273,7 +2273,7 @@ static int drain_freelist(struct kmem_ca
+@@ -2215,7 +2215,7 @@ static int drain_freelist(struct kmem_ca
* to the cache.
*/
n->free_objects -= cache->num;
@@ -215,16 +215,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slab_destroy(cache, page);
nr_freed++;
}
-@@ -2728,7 +2728,7 @@ static void cache_grow_end(struct kmem_c
- INIT_LIST_HEAD(&page->lru);
+@@ -2664,7 +2664,7 @@ static void cache_grow_end(struct kmem_c
+ INIT_LIST_HEAD(&page->slab_list);
n = get_node(cachep, page_to_nid(page));
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
n->total_slabs++;
if (!page->active) {
- list_add_tail(&page->lru, &(n->slabs_free));
-@@ -2738,7 +2738,7 @@ static void cache_grow_end(struct kmem_c
+ list_add_tail(&page->slab_list, &n->slabs_free);
+@@ -2674,7 +2674,7 @@ static void cache_grow_end(struct kmem_c
STATS_INC_GROWN(cachep);
n->free_objects += cachep->num - page->active;
@@ -233,16 +233,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
fixup_objfreelist_debug(cachep, &list);
}
-@@ -2906,7 +2906,7 @@ static struct page *get_first_slab(struc
+@@ -2840,7 +2840,7 @@ static struct page *get_first_slab(struc
{
struct page *page;
- assert_spin_locked(&n->list_lock);
+ assert_raw_spin_locked(&n->list_lock);
- page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
+ page = list_first_entry_or_null(&n->slabs_partial, struct page,
+ slab_list);
if (!page) {
- n->free_touched = 1;
-@@ -2932,10 +2932,10 @@ static noinline void *cache_alloc_pfmema
+@@ -2867,10 +2867,10 @@ static noinline void *cache_alloc_pfmema
if (!gfp_pfmemalloc_allowed(flags))
return NULL;
@@ -255,7 +255,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return NULL;
}
-@@ -2944,7 +2944,7 @@ static noinline void *cache_alloc_pfmema
+@@ -2879,7 +2879,7 @@ static noinline void *cache_alloc_pfmema
fixup_slab_list(cachep, n, page, &list);
@@ -264,7 +264,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
fixup_objfreelist_debug(cachep, &list);
return obj;
-@@ -3003,7 +3003,7 @@ static void *cache_alloc_refill(struct k
+@@ -2938,7 +2938,7 @@ static void *cache_alloc_refill(struct k
if (!n->free_objects && (!shared || !shared->avail))
goto direct_grow;
@@ -273,7 +273,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
shared = READ_ONCE(n->shared);
/* See if we can refill from the shared array */
-@@ -3027,7 +3027,7 @@ static void *cache_alloc_refill(struct k
+@@ -2962,7 +2962,7 @@ static void *cache_alloc_refill(struct k
must_grow:
n->free_objects -= ac->avail;
alloc_done:
@@ -282,7 +282,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
fixup_objfreelist_debug(cachep, &list);
direct_grow:
-@@ -3252,7 +3252,7 @@ static void *____cache_alloc_node(struct
+@@ -3187,7 +3187,7 @@ static void *____cache_alloc_node(struct
BUG_ON(!n);
check_irq_off();
@@ -291,7 +291,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
page = get_first_slab(n, false);
if (!page)
goto must_grow;
-@@ -3270,12 +3270,12 @@ static void *____cache_alloc_node(struct
+@@ -3205,12 +3205,12 @@ static void *____cache_alloc_node(struct
fixup_slab_list(cachep, n, page, &list);
@@ -306,7 +306,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
if (page) {
/* This slab isn't counted yet so don't update free_objects */
-@@ -3451,7 +3451,7 @@ static void cache_flusharray(struct kmem
+@@ -3386,7 +3386,7 @@ static void cache_flusharray(struct kmem
check_irq_off();
n = get_node(cachep, node);
@@ -315,7 +315,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (n->shared) {
struct array_cache *shared_array = n->shared;
int max = shared_array->limit - shared_array->avail;
-@@ -3480,7 +3480,7 @@ static void cache_flusharray(struct kmem
+@@ -3415,7 +3415,7 @@ static void cache_flusharray(struct kmem
STATS_SET_FREEABLE(cachep, i);
}
#endif
@@ -324,7 +324,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
ac->avail -= batchcount;
memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
-@@ -3888,9 +3888,9 @@ static int __do_tune_cpucache(struct kme
+@@ -3829,9 +3829,9 @@ static int __do_tune_cpucache(struct kme
node = cpu_to_mem(cpu);
n = get_node(cachep, node);
@@ -336,7 +336,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
free_percpu(prev);
-@@ -4015,9 +4015,9 @@ static void drain_array(struct kmem_cach
+@@ -3956,9 +3956,9 @@ static void drain_array(struct kmem_cach
return;
}
@@ -348,7 +348,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
-@@ -4101,7 +4101,7 @@ void get_slabinfo(struct kmem_cache *cac
+@@ -4042,7 +4042,7 @@ void get_slabinfo(struct kmem_cache *cac
for_each_kmem_cache_node(cachep, node, n) {
check_irq_on();
@@ -357,7 +357,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
total_slabs += n->total_slabs;
free_slabs += n->free_slabs;
-@@ -4110,7 +4110,7 @@ void get_slabinfo(struct kmem_cache *cac
+@@ -4051,7 +4051,7 @@ void get_slabinfo(struct kmem_cache *cac
if (n->shared)
shared_avail += n->shared->avail;
@@ -366,25 +366,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
num_objs = total_slabs * cachep->num;
active_slabs = total_slabs - free_slabs;
-@@ -4325,13 +4325,13 @@ static int leaks_show(struct seq_file *m
- for_each_kmem_cache_node(cachep, node, n) {
-
- check_irq_on();
-- spin_lock_irq(&n->list_lock);
-+ raw_spin_lock_irq(&n->list_lock);
-
- list_for_each_entry(page, &n->slabs_full, lru)
- handle_slab(x, cachep, page);
- list_for_each_entry(page, &n->slabs_partial, lru)
- handle_slab(x, cachep, page);
-- spin_unlock_irq(&n->list_lock);
-+ raw_spin_unlock_irq(&n->list_lock);
- }
- } while (!is_store_user_clean(cachep));
-
--- a/mm/slab.h
+++ b/mm/slab.h
-@@ -453,7 +453,7 @@ static inline void slab_post_alloc_hook(
+@@ -449,7 +449,7 @@ static inline void slab_post_alloc_hook(
* The slab lists for all objects.
*/
struct kmem_cache_node {
@@ -395,7 +379,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct list_head slabs_partial; /* partial list first, better asm code */
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -1184,7 +1184,7 @@ static noinline int free_debug_processin
+@@ -1175,7 +1175,7 @@ static noinline int free_debug_processin
unsigned long uninitialized_var(flags);
int ret = 0;
@@ -404,7 +388,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slab_lock(page);
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
-@@ -1219,7 +1219,7 @@ static noinline int free_debug_processin
+@@ -1210,7 +1210,7 @@ static noinline int free_debug_processin
bulk_cnt, cnt);
slab_unlock(page);
@@ -413,16 +397,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!ret)
slab_fix(s, "Object at 0x%p not freed", object);
return ret;
-@@ -1863,7 +1863,7 @@ static void *get_partial_node(struct kme
+@@ -1854,7 +1854,7 @@ static void *get_partial_node(struct kme
if (!n || !n->nr_partial)
return NULL;
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
- list_for_each_entry_safe(page, page2, &n->partial, lru) {
+ list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
void *t;
-@@ -1888,7 +1888,7 @@ static void *get_partial_node(struct kme
+@@ -1879,7 +1879,7 @@ static void *get_partial_node(struct kme
break;
}
@@ -431,7 +415,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return object;
}
-@@ -2134,7 +2134,7 @@ static void deactivate_slab(struct kmem_
+@@ -2125,7 +2125,7 @@ static void deactivate_slab(struct kmem_
* that acquire_slab() will see a slab page that
* is frozen
*/
@@ -440,7 +424,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
} else {
m = M_FULL;
-@@ -2145,7 +2145,7 @@ static void deactivate_slab(struct kmem_
+@@ -2136,7 +2136,7 @@ static void deactivate_slab(struct kmem_
* slabs from diagnostic functions will not see
* any frozen slabs.
*/
@@ -449,7 +433,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -2169,7 +2169,7 @@ static void deactivate_slab(struct kmem_
+@@ -2160,7 +2160,7 @@ static void deactivate_slab(struct kmem_
goto redo;
if (lock)
@@ -458,7 +442,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (m == M_PARTIAL)
stat(s, tail);
-@@ -2208,10 +2208,10 @@ static void unfreeze_partials(struct kme
+@@ -2199,10 +2199,10 @@ static void unfreeze_partials(struct kme
n2 = get_node(s, page_to_nid(page));
if (n != n2) {
if (n)
@@ -471,7 +455,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
do {
-@@ -2240,7 +2240,7 @@ static void unfreeze_partials(struct kme
+@@ -2231,7 +2231,7 @@ static void unfreeze_partials(struct kme
}
if (n)
@@ -480,20 +464,20 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
while (discard_page) {
page = discard_page;
-@@ -2407,10 +2407,10 @@ static unsigned long count_partial(struc
+@@ -2398,10 +2398,10 @@ static unsigned long count_partial(struc
unsigned long x = 0;
struct page *page;
- spin_lock_irqsave(&n->list_lock, flags);
+ raw_spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->partial, lru)
+ list_for_each_entry(page, &n->partial, slab_list)
x += get_count(page);
- spin_unlock_irqrestore(&n->list_lock, flags);
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
return x;
}
#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
-@@ -2845,7 +2845,7 @@ static void __slab_free(struct kmem_cach
+@@ -2835,7 +2835,7 @@ static void __slab_free(struct kmem_cach
do {
if (unlikely(n)) {
@@ -502,7 +486,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
n = NULL;
}
prior = page->freelist;
-@@ -2877,7 +2877,7 @@ static void __slab_free(struct kmem_cach
+@@ -2867,7 +2867,7 @@ static void __slab_free(struct kmem_cach
* Otherwise the list_lock will synchronize with
* other processors updating the list of slabs.
*/
@@ -511,7 +495,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -2919,7 +2919,7 @@ static void __slab_free(struct kmem_cach
+@@ -2908,7 +2908,7 @@ static void __slab_free(struct kmem_cach
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
@@ -520,7 +504,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return;
slab_empty:
-@@ -2934,7 +2934,7 @@ static void __slab_free(struct kmem_cach
+@@ -2923,7 +2923,7 @@ static void __slab_free(struct kmem_cach
remove_full(s, n, page);
}
@@ -529,7 +513,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stat(s, FREE_SLAB);
discard_slab(s, page);
}
-@@ -3321,7 +3321,7 @@ static void
+@@ -3310,7 +3310,7 @@ static void
init_kmem_cache_node(struct kmem_cache_node *n)
{
n->nr_partial = 0;
@@ -538,25 +522,25 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
-@@ -3706,7 +3706,7 @@ static void free_partial(struct kmem_cac
+@@ -3695,7 +3695,7 @@ static void free_partial(struct kmem_cac
struct page *page, *h;
BUG_ON(irqs_disabled());
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
- list_for_each_entry_safe(page, h, &n->partial, lru) {
+ list_for_each_entry_safe(page, h, &n->partial, slab_list) {
if (!page->inuse) {
remove_partial(n, page);
-@@ -3716,7 +3716,7 @@ static void free_partial(struct kmem_cac
+@@ -3705,7 +3705,7 @@ static void free_partial(struct kmem_cac
"Objects remaining in %s on __kmem_cache_shutdown()");
}
}
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
- list_for_each_entry_safe(page, h, &discard, lru)
+ list_for_each_entry_safe(page, h, &discard, slab_list)
discard_slab(s, page);
-@@ -3990,7 +3990,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -3979,7 +3979,7 @@ int __kmem_cache_shrink(struct kmem_cach
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
INIT_LIST_HEAD(promote + i);
@@ -565,7 +549,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Build lists of slabs to discard or promote.
-@@ -4021,7 +4021,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -4010,7 +4010,7 @@ int __kmem_cache_shrink(struct kmem_cach
for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
list_splice(promote + i, &n->partial);
@@ -573,17 +557,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
/* Release empty slabs */
- list_for_each_entry_safe(page, t, &discard, lru)
-@@ -4435,7 +4435,7 @@ static int validate_slab_node(struct kme
+ list_for_each_entry_safe(page, t, &discard, slab_list)
+@@ -4424,7 +4424,7 @@ static int validate_slab_node(struct kme
struct page *page;
unsigned long flags;
- spin_lock_irqsave(&n->list_lock, flags);
+ raw_spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->partial, lru) {
+ list_for_each_entry(page, &n->partial, slab_list) {
validate_slab_slab(s, page, map);
-@@ -4457,7 +4457,7 @@ static int validate_slab_node(struct kme
+@@ -4446,7 +4446,7 @@ static int validate_slab_node(struct kme
s->name, count, atomic_long_read(&n->nr_slabs));
out:
@@ -592,15 +576,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return count;
}
-@@ -4643,12 +4643,12 @@ static int list_locations(struct kmem_ca
+@@ -4632,12 +4632,12 @@ static int list_locations(struct kmem_ca
if (!atomic_long_read(&n->nr_slabs))
continue;
- spin_lock_irqsave(&n->list_lock, flags);
+ raw_spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->partial, lru)
+ list_for_each_entry(page, &n->partial, slab_list)
process_slab(&t, s, page, alloc, map);
- list_for_each_entry(page, &n->full, lru)
+ list_for_each_entry(page, &n->full, slab_list)
process_slab(&t, s, page, alloc, map);
- spin_unlock_irqrestore(&n->list_lock, flags);
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);