diff options
Diffstat (limited to 'debian/patches-rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch')
-rw-r--r-- | debian/patches-rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch | 79 |
1 files changed, 20 insertions, 59 deletions
diff --git a/debian/patches-rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/debian/patches-rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch index cedce16d2..b25c97da6 100644 --- a/debian/patches-rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch +++ b/debian/patches-rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch @@ -1,7 +1,7 @@ From: Ingo Molnar <mingo@elte.hu> Date: Fri, 3 Jul 2009 08:29:37 -0500 Subject: mm: page_alloc: rt-friendly per-cpu pages -Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2.17-rt9.tar.xz +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.4/older/patches-5.4.3-rt1.tar.xz rt-friendly per-cpu pages: convert the irqs-off per-cpu locking method into a preemptible, explicit-per-cpu-locks method. @@ -13,12 +13,12 @@ Contains fixes from: Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- - mm/page_alloc.c | 62 ++++++++++++++++++++++++++++++++++++++------------------ - 1 file changed, 43 insertions(+), 19 deletions(-) + mm/page_alloc.c | 51 ++++++++++++++++++++++++++++++++------------------- + 1 file changed, 32 insertions(+), 19 deletions(-) --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -62,6 +62,7 @@ +@@ -61,6 +61,7 @@ #include <linux/hugetlb.h> #include <linux/sched/rt.h> #include <linux/sched/mm.h> @@ -26,13 +26,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #include <linux/page_owner.h> #include <linux/kthread.h> #include <linux/memcontrol.h> -@@ -311,6 +312,18 @@ EXPORT_SYMBOL(nr_node_ids); +@@ -357,6 +358,18 @@ EXPORT_SYMBOL(nr_node_ids); EXPORT_SYMBOL(nr_online_nodes); #endif +static DEFINE_LOCAL_IRQ_LOCK(pa_lock); + -+#ifdef CONFIG_PREEMPT_RT_BASE ++#ifdef CONFIG_PREEMPT_RT +# define cpu_lock_irqsave(cpu, flags) \ + local_lock_irqsave_on(pa_lock, flags, cpu) +# define cpu_unlock_irqrestore(cpu, flags) \ @@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> int page_group_by_mobility_disabled __read_mostly; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT -@@ -1389,10 +1402,10 @@ static void __free_pages_ok(struct page +@@ -1450,10 +1463,10 @@ static void __free_pages_ok(struct page return; migratetype = get_pfnblock_migratetype(page, pfn); @@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } void __free_pages_core(struct page *page, unsigned int order) -@@ -2737,13 +2750,13 @@ void drain_zone_pages(struct zone *zone, +@@ -2823,13 +2836,13 @@ void drain_zone_pages(struct zone *zone, int to_drain, batch; LIST_HEAD(dst); @@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (to_drain > 0) free_pcppages_bulk(zone, &dst, false); -@@ -2765,7 +2778,7 @@ static void drain_pages_zone(unsigned in +@@ -2851,7 +2864,7 @@ static void drain_pages_zone(unsigned in LIST_HEAD(dst); int count; @@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; -@@ -2773,7 +2786,7 @@ static void drain_pages_zone(unsigned in +@@ -2859,7 +2872,7 @@ static void drain_pages_zone(unsigned in if (count) isolate_pcp_pages(count, pcp, &dst); @@ -92,46 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (count) free_pcppages_bulk(zone, &dst, false); -@@ -2811,6 +2824,7 @@ void drain_local_pages(struct zone *zone - drain_pages(cpu); - } - -+#ifndef CONFIG_PREEMPT_RT_BASE - static void drain_local_pages_wq(struct work_struct *work) - { - struct pcpu_drain *drain; -@@ -2828,6 +2842,7 @@ static void drain_local_pages_wq(struct - drain_local_pages(drain->zone); - preempt_enable(); - } -+#endif - - /* - * Spill all the per-cpu pages from all CPUs back into the buddy allocator. -@@ -2895,6 +2910,14 @@ void drain_all_pages(struct zone *zone) - cpumask_clear_cpu(cpu, &cpus_with_pcps); - } - -+#ifdef CONFIG_PREEMPT_RT_BASE -+ for_each_cpu(cpu, &cpus_with_pcps) { -+ if (zone) -+ drain_pages_zone(cpu, zone); -+ else -+ drain_pages(cpu); -+ } -+#else - for_each_cpu(cpu, &cpus_with_pcps) { - struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu); - -@@ -2904,6 +2927,7 @@ void drain_all_pages(struct zone *zone) - } - for_each_cpu(cpu, &cpus_with_pcps) - flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work); -+#endif - - mutex_unlock(&pcpu_drain_mutex); - } -@@ -3023,9 +3047,9 @@ void free_unref_page(struct page *page) +@@ -3109,9 +3122,9 @@ void free_unref_page(struct page *page) if (!free_unref_page_prepare(page, pfn)) return; @@ -143,7 +104,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (!list_empty(&dst)) free_pcppages_bulk(zone, &dst, false); } -@@ -3052,7 +3076,7 @@ void free_unref_page_list(struct list_he +@@ -3138,7 +3151,7 @@ void free_unref_page_list(struct list_he set_page_private(page, pfn); } @@ -152,7 +113,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> list_for_each_entry_safe(page, next, list, lru) { unsigned long pfn = page_private(page); enum zone_type type; -@@ -3067,12 +3091,12 @@ void free_unref_page_list(struct list_he +@@ -3153,12 +3166,12 @@ void free_unref_page_list(struct list_he * a large list of pages to free. */ if (++batch_count == SWAP_CLUSTER_MAX) { @@ -168,7 +129,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> for (i = 0; i < __MAX_NR_ZONES; ) { struct page *page; -@@ -3222,7 +3246,7 @@ static struct page *rmqueue_pcplist(stru +@@ -3308,7 +3321,7 @@ static struct page *rmqueue_pcplist(stru struct page *page; unsigned long flags; @@ -177,7 +138,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list); -@@ -3230,7 +3254,7 @@ static struct page *rmqueue_pcplist(stru +@@ -3316,7 +3329,7 @@ static struct page *rmqueue_pcplist(stru __count_zid_vm_events(PGALLOC, page_zonenum(page), 1); zone_statistics(preferred_zone, zone); } @@ -186,7 +147,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return page; } -@@ -3257,7 +3281,7 @@ struct page *rmqueue(struct zone *prefer +@@ -3343,7 +3356,7 @@ struct page *rmqueue(struct zone *prefer * allocate greater than order-1 page units with __GFP_NOFAIL. */ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); @@ -195,7 +156,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> do { page = NULL; -@@ -3277,7 +3301,7 @@ struct page *rmqueue(struct zone *prefer +@@ -3363,7 +3376,7 @@ struct page *rmqueue(struct zone *prefer __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); zone_statistics(preferred_zone, zone); @@ -204,7 +165,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> out: /* Separate test+clear to avoid unnecessary atomics */ -@@ -3290,7 +3314,7 @@ struct page *rmqueue(struct zone *prefer +@@ -3376,7 +3389,7 @@ struct page *rmqueue(struct zone *prefer return page; failed: @@ -213,7 +174,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return NULL; } -@@ -8479,7 +8503,7 @@ void zone_pcp_reset(struct zone *zone) +@@ -8600,7 +8613,7 @@ void zone_pcp_reset(struct zone *zone) struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ @@ -222,7 +183,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (zone->pageset != &boot_pageset) { for_each_online_cpu(cpu) { pset = per_cpu_ptr(zone->pageset, cpu); -@@ -8488,7 +8512,7 @@ void zone_pcp_reset(struct zone *zone) +@@ -8609,7 +8622,7 @@ void zone_pcp_reset(struct zone *zone) free_percpu(zone->pageset); zone->pageset = &boot_pageset; } |