diff options
Diffstat (limited to 'debian/patches-rt/0201-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch')
-rw-r--r-- | debian/patches-rt/0201-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch | 603 |
1 files changed, 0 insertions, 603 deletions
diff --git a/debian/patches-rt/0201-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch b/debian/patches-rt/0201-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch deleted file mode 100644 index 9cfe29d99..000000000 --- a/debian/patches-rt/0201-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch +++ /dev/null @@ -1,603 +0,0 @@ -From 3da16c3dad48b9fffed0477f45dffff8672958ff Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner <tglx@linutronix.de> -Date: Mon, 28 May 2018 15:24:22 +0200 -Subject: [PATCH 201/296] mm/SLxB: change list_lock to raw_spinlock_t -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.35-rt39.tar.xz - -The list_lock is used with used with IRQs off on RT. Make it a raw_spinlock_t -otherwise the interrupts won't be disabled on -RT. The locking rules remain -the same on !RT. -This patch changes it for SLAB and SLUB since both share the same header -file for struct kmem_cache_node defintion. - -Signed-off-by: Thomas Gleixner <tglx@linutronix.de> -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - mm/slab.c | 90 +++++++++++++++++++++++++++---------------------------- - mm/slab.h | 2 +- - mm/slub.c | 50 +++++++++++++++---------------- - 3 files changed, 71 insertions(+), 71 deletions(-) - -diff --git a/mm/slab.c b/mm/slab.c -index b1113561b98b..a28b54325d9e 100644 ---- a/mm/slab.c -+++ b/mm/slab.c -@@ -233,7 +233,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) - parent->shared = NULL; - parent->alien = NULL; - parent->colour_next = 0; -- spin_lock_init(&parent->list_lock); -+ raw_spin_lock_init(&parent->list_lock); - parent->free_objects = 0; - parent->free_touched = 0; - } -@@ -558,9 +558,9 @@ static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep, - page_node = page_to_nid(page); - n = get_node(cachep, page_node); - -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - free_block(cachep, &objp, 1, page_node, &list); -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - - slabs_destroy(cachep, &list); - } -@@ -698,7 +698,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep, - struct kmem_cache_node *n = get_node(cachep, node); - - if (ac->avail) { -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - /* - * Stuff objects into the remote nodes shared array first. - * That way we could avoid the overhead of putting the objects -@@ -709,7 +709,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep, - - free_block(cachep, ac->entry, ac->avail, node, list); - ac->avail = 0; -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - } - } - -@@ -782,9 +782,9 @@ static int __cache_free_alien(struct kmem_cache *cachep, void *objp, - slabs_destroy(cachep, &list); - } else { - n = get_node(cachep, page_node); -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - free_block(cachep, &objp, 1, page_node, &list); -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - slabs_destroy(cachep, &list); - } - return 1; -@@ -825,10 +825,10 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) - */ - n = get_node(cachep, node); - if (n) { -- spin_lock_irq(&n->list_lock); -+ raw_spin_lock_irq(&n->list_lock); - n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + - cachep->num; -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - - return 0; - } -@@ -907,7 +907,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep, - goto fail; - - n = get_node(cachep, node); -- spin_lock_irq(&n->list_lock); -+ raw_spin_lock_irq(&n->list_lock); - if (n->shared && force_change) { - free_block(cachep, n->shared->entry, - n->shared->avail, node, &list); -@@ -925,7 +925,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep, - new_alien = NULL; - } - -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - slabs_destroy(cachep, &list); - - /* -@@ -964,7 +964,7 @@ static void cpuup_canceled(long cpu) - if (!n) - continue; - -- spin_lock_irq(&n->list_lock); -+ raw_spin_lock_irq(&n->list_lock); - - /* Free limit for this kmem_cache_node */ - n->free_limit -= cachep->batchcount; -@@ -975,7 +975,7 @@ static void cpuup_canceled(long cpu) - nc->avail = 0; - - if (!cpumask_empty(mask)) { -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - goto free_slab; - } - -@@ -989,7 +989,7 @@ static void cpuup_canceled(long cpu) - alien = n->alien; - n->alien = NULL; - -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - - kfree(shared); - if (alien) { -@@ -1173,7 +1173,7 @@ static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node * - /* - * Do not assume that spinlocks can be initialized via memcpy: - */ -- spin_lock_init(&ptr->list_lock); -+ raw_spin_lock_init(&ptr->list_lock); - - MAKE_ALL_LISTS(cachep, ptr, nodeid); - cachep->node[nodeid] = ptr; -@@ -1344,11 +1344,11 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) - for_each_kmem_cache_node(cachep, node, n) { - unsigned long total_slabs, free_slabs, free_objs; - -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - total_slabs = n->total_slabs; - free_slabs = n->free_slabs; - free_objs = n->free_objects; -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - - pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n", - node, total_slabs - free_slabs, total_slabs, -@@ -2106,7 +2106,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep) - { - #ifdef CONFIG_SMP - check_irq_off(); -- assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); -+ assert_raw_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); - #endif - } - -@@ -2114,7 +2114,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) - { - #ifdef CONFIG_SMP - check_irq_off(); -- assert_spin_locked(&get_node(cachep, node)->list_lock); -+ assert_raw_spin_locked(&get_node(cachep, node)->list_lock); - #endif - } - -@@ -2154,9 +2154,9 @@ static void do_drain(void *arg) - check_irq_off(); - ac = cpu_cache_get(cachep); - n = get_node(cachep, node); -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - free_block(cachep, ac->entry, ac->avail, node, &list); -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - ac->avail = 0; - slabs_destroy(cachep, &list); - } -@@ -2174,9 +2174,9 @@ static void drain_cpu_caches(struct kmem_cache *cachep) - drain_alien_cache(cachep, n->alien); - - for_each_kmem_cache_node(cachep, node, n) { -- spin_lock_irq(&n->list_lock); -+ raw_spin_lock_irq(&n->list_lock); - drain_array_locked(cachep, n->shared, node, true, &list); -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - - slabs_destroy(cachep, &list); - } -@@ -2198,10 +2198,10 @@ static int drain_freelist(struct kmem_cache *cache, - nr_freed = 0; - while (nr_freed < tofree && !list_empty(&n->slabs_free)) { - -- spin_lock_irq(&n->list_lock); -+ raw_spin_lock_irq(&n->list_lock); - p = n->slabs_free.prev; - if (p == &n->slabs_free) { -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - goto out; - } - -@@ -2214,7 +2214,7 @@ static int drain_freelist(struct kmem_cache *cache, - * to the cache. - */ - n->free_objects -= cache->num; -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - slab_destroy(cache, page); - nr_freed++; - } -@@ -2650,7 +2650,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page) - INIT_LIST_HEAD(&page->slab_list); - n = get_node(cachep, page_to_nid(page)); - -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - n->total_slabs++; - if (!page->active) { - list_add_tail(&page->slab_list, &n->slabs_free); -@@ -2660,7 +2660,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page) - - STATS_INC_GROWN(cachep); - n->free_objects += cachep->num - page->active; -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - - fixup_objfreelist_debug(cachep, &list); - } -@@ -2826,7 +2826,7 @@ static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc) - { - struct page *page; - -- assert_spin_locked(&n->list_lock); -+ assert_raw_spin_locked(&n->list_lock); - page = list_first_entry_or_null(&n->slabs_partial, struct page, - slab_list); - if (!page) { -@@ -2853,10 +2853,10 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep, - if (!gfp_pfmemalloc_allowed(flags)) - return NULL; - -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - page = get_first_slab(n, true); - if (!page) { -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - return NULL; - } - -@@ -2865,7 +2865,7 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep, - - fixup_slab_list(cachep, n, page, &list); - -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - fixup_objfreelist_debug(cachep, &list); - - return obj; -@@ -2924,7 +2924,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) - if (!n->free_objects && (!shared || !shared->avail)) - goto direct_grow; - -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - shared = READ_ONCE(n->shared); - - /* See if we can refill from the shared array */ -@@ -2948,7 +2948,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) - must_grow: - n->free_objects -= ac->avail; - alloc_done: -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - fixup_objfreelist_debug(cachep, &list); - - direct_grow: -@@ -3173,7 +3173,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, - BUG_ON(!n); - - check_irq_off(); -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - page = get_first_slab(n, false); - if (!page) - goto must_grow; -@@ -3191,12 +3191,12 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, - - fixup_slab_list(cachep, n, page, &list); - -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - fixup_objfreelist_debug(cachep, &list); - return obj; - - must_grow: -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid); - if (page) { - /* This slab isn't counted yet so don't update free_objects */ -@@ -3374,7 +3374,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) - - check_irq_off(); - n = get_node(cachep, node); -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - if (n->shared) { - struct array_cache *shared_array = n->shared; - int max = shared_array->limit - shared_array->avail; -@@ -3403,7 +3403,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) - STATS_SET_FREEABLE(cachep, i); - } - #endif -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - ac->avail -= batchcount; - memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); - slabs_destroy(cachep, &list); -@@ -3832,9 +3832,9 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, - - node = cpu_to_mem(cpu); - n = get_node(cachep, node); -- spin_lock_irq(&n->list_lock); -+ raw_spin_lock_irq(&n->list_lock); - free_block(cachep, ac->entry, ac->avail, node, &list); -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - slabs_destroy(cachep, &list); - } - free_percpu(prev); -@@ -3929,9 +3929,9 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, - return; - } - -- spin_lock_irq(&n->list_lock); -+ raw_spin_lock_irq(&n->list_lock); - drain_array_locked(cachep, ac, node, false, &list); -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - - slabs_destroy(cachep, &list); - } -@@ -4015,7 +4015,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) - - for_each_kmem_cache_node(cachep, node, n) { - check_irq_on(); -- spin_lock_irq(&n->list_lock); -+ raw_spin_lock_irq(&n->list_lock); - - total_slabs += n->total_slabs; - free_slabs += n->free_slabs; -@@ -4024,7 +4024,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) - if (n->shared) - shared_avail += n->shared->avail; - -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - } - num_objs = total_slabs * cachep->num; - active_slabs = total_slabs - free_slabs; -diff --git a/mm/slab.h b/mm/slab.h -index f9977d6613d6..c9a43b787609 100644 ---- a/mm/slab.h -+++ b/mm/slab.h -@@ -546,7 +546,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, - * The slab lists for all objects. - */ - struct kmem_cache_node { -- spinlock_t list_lock; -+ raw_spinlock_t list_lock; - - #ifdef CONFIG_SLAB - struct list_head slabs_partial; /* partial list first, better asm code */ -diff --git a/mm/slub.c b/mm/slub.c -index fbc415c34009..ff543c29c3c7 100644 ---- a/mm/slub.c -+++ b/mm/slub.c -@@ -1213,7 +1213,7 @@ static noinline int free_debug_processing( - unsigned long flags; - int ret = 0; - -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - slab_lock(page); - - if (s->flags & SLAB_CONSISTENCY_CHECKS) { -@@ -1248,7 +1248,7 @@ static noinline int free_debug_processing( - bulk_cnt, cnt); - - slab_unlock(page); -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - if (!ret) - slab_fix(s, "Object at 0x%p not freed", object); - return ret; -@@ -1962,7 +1962,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, - if (!n || !n->nr_partial) - return NULL; - -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - list_for_each_entry_safe(page, page2, &n->partial, slab_list) { - void *t; - -@@ -1987,7 +1987,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, - break; - - } -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - return object; - } - -@@ -2241,7 +2241,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, - * that acquire_slab() will see a slab page that - * is frozen - */ -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - } - } else { - m = M_FULL; -@@ -2253,7 +2253,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, - * slabs from diagnostic functions will not see - * any frozen slabs. - */ -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - } - #endif - } -@@ -2278,7 +2278,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, - goto redo; - - if (lock) -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - - if (m == M_PARTIAL) - stat(s, tail); -@@ -2317,10 +2317,10 @@ static void unfreeze_partials(struct kmem_cache *s, - n2 = get_node(s, page_to_nid(page)); - if (n != n2) { - if (n) -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - - n = n2; -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - } - - do { -@@ -2349,7 +2349,7 @@ static void unfreeze_partials(struct kmem_cache *s, - } - - if (n) -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - - while (discard_page) { - page = discard_page; -@@ -2516,10 +2516,10 @@ static unsigned long count_partial(struct kmem_cache_node *n, - unsigned long x = 0; - struct page *page; - -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, slab_list) - x += get_count(page); -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - return x; - } - #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ -@@ -2978,7 +2978,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, - - do { - if (unlikely(n)) { -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - n = NULL; - } - prior = page->freelist; -@@ -3010,7 +3010,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, - * Otherwise the list_lock will synchronize with - * other processors updating the list of slabs. - */ -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - - } - } -@@ -3052,7 +3052,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, - add_partial(n, page, DEACTIVATE_TO_TAIL); - stat(s, FREE_ADD_PARTIAL); - } -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - return; - - slab_empty: -@@ -3067,7 +3067,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, - remove_full(s, n, page); - } - -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - stat(s, FREE_SLAB); - discard_slab(s, page); - } -@@ -3472,7 +3472,7 @@ static void - init_kmem_cache_node(struct kmem_cache_node *n) - { - n->nr_partial = 0; -- spin_lock_init(&n->list_lock); -+ raw_spin_lock_init(&n->list_lock); - INIT_LIST_HEAD(&n->partial); - #ifdef CONFIG_SLUB_DEBUG - atomic_long_set(&n->nr_slabs, 0); -@@ -3873,7 +3873,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) - struct page *page, *h; - - BUG_ON(irqs_disabled()); -- spin_lock_irq(&n->list_lock); -+ raw_spin_lock_irq(&n->list_lock); - list_for_each_entry_safe(page, h, &n->partial, slab_list) { - if (!page->inuse) { - remove_partial(n, page); -@@ -3883,7 +3883,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) - "Objects remaining in %s on __kmem_cache_shutdown()"); - } - } -- spin_unlock_irq(&n->list_lock); -+ raw_spin_unlock_irq(&n->list_lock); - - list_for_each_entry_safe(page, h, &discard, slab_list) - discard_slab(s, page); -@@ -4154,7 +4154,7 @@ int __kmem_cache_shrink(struct kmem_cache *s) - for (i = 0; i < SHRINK_PROMOTE_MAX; i++) - INIT_LIST_HEAD(promote + i); - -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - - /* - * Build lists of slabs to discard or promote. -@@ -4185,7 +4185,7 @@ int __kmem_cache_shrink(struct kmem_cache *s) - for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) - list_splice(promote + i, &n->partial); - -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - - /* Release empty slabs */ - list_for_each_entry_safe(page, t, &discard, slab_list) -@@ -4547,7 +4547,7 @@ static int validate_slab_node(struct kmem_cache *s, - struct page *page; - unsigned long flags; - -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - - list_for_each_entry(page, &n->partial, slab_list) { - validate_slab(s, page); -@@ -4569,7 +4569,7 @@ static int validate_slab_node(struct kmem_cache *s, - s->name, count, atomic_long_read(&n->nr_slabs)); - - out: -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - return count; - } - -@@ -4748,12 +4748,12 @@ static int list_locations(struct kmem_cache *s, char *buf, - if (!atomic_long_read(&n->nr_slabs)) - continue; - -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, slab_list) - process_slab(&t, s, page, alloc); - list_for_each_entry(page, &n->full, slab_list) - process_slab(&t, s, page, alloc); -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - } - - for (i = 0; i < t.count; i++) { --- -2.30.2 - |