summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/Use-CONFIG_PREEMPTION.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/Use-CONFIG_PREEMPTION.patch')
-rw-r--r--debian/patches-rt/Use-CONFIG_PREEMPTION.patch1524
1 files changed, 1524 insertions, 0 deletions
diff --git a/debian/patches-rt/Use-CONFIG_PREEMPTION.patch b/debian/patches-rt/Use-CONFIG_PREEMPTION.patch
new file mode 100644
index 000000000..ed9902f0e
--- /dev/null
+++ b/debian/patches-rt/Use-CONFIG_PREEMPTION.patch
@@ -0,0 +1,1524 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 26 Jul 2019 11:30:49 +0200
+Subject: [PATCH] Use CONFIG_PREEMPTION
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.4/older/patches-5.4.3-rt1.tar.xz
+
+Thisi is an all-in-one patch of the current `PREEMPTION' branch.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html | 8 -
+ Documentation/RCU/Design/Requirements/Requirements.html | 24 ++--
+ Documentation/RCU/checklist.txt | 4
+ Documentation/RCU/rcubarrier.txt | 8 -
+ Documentation/RCU/stallwarn.txt | 4
+ Documentation/RCU/whatisRCU.txt | 7 -
+ Documentation/trace/ftrace-uses.rst | 2
+ arch/arc/kernel/entry.S | 6 -
+ arch/arm/include/asm/switch_to.h | 2
+ arch/arm/kernel/entry-armv.S | 4
+ arch/arm/kernel/traps.c | 2
+ arch/arm/mm/cache-v7.S | 4
+ arch/arm/mm/cache-v7m.S | 4
+ arch/arm64/Kconfig | 52 +++++-----
+ arch/arm64/crypto/sha256-glue.c | 2
+ arch/arm64/include/asm/assembler.h | 6 -
+ arch/arm64/include/asm/preempt.h | 4
+ arch/arm64/kernel/entry.S | 2
+ arch/arm64/kernel/traps.c | 3
+ arch/c6x/kernel/entry.S | 8 -
+ arch/csky/kernel/entry.S | 4
+ arch/h8300/kernel/entry.S | 6 -
+ arch/hexagon/kernel/vm_entry.S | 6 -
+ arch/ia64/kernel/entry.S | 12 +-
+ arch/ia64/kernel/kprobes.c | 2
+ arch/m68k/coldfire/entry.S | 2
+ arch/microblaze/kernel/entry.S | 2
+ arch/mips/include/asm/asmmacro.h | 4
+ arch/mips/kernel/entry.S | 6 -
+ arch/nds32/Kconfig | 2
+ arch/nds32/kernel/ex-exit.S | 4
+ arch/nios2/kernel/entry.S | 2
+ arch/parisc/Kconfig | 2
+ arch/parisc/kernel/entry.S | 10 -
+ arch/powerpc/Kconfig | 2
+ arch/powerpc/kernel/entry_32.S | 4
+ arch/powerpc/kernel/entry_64.S | 4
+ arch/powerpc/kernel/traps.c | 7 +
+ arch/riscv/kernel/entry.S | 4
+ arch/s390/Kconfig | 2
+ arch/s390/include/asm/preempt.h | 4
+ arch/s390/kernel/dumpstack.c | 2
+ arch/s390/kernel/entry.S | 2
+ arch/sh/Kconfig | 2
+ arch/sh/kernel/cpu/sh5/entry.S | 4
+ arch/sh/kernel/entry-common.S | 4
+ arch/sparc/Kconfig | 2
+ arch/sparc/kernel/rtrap_64.S | 2
+ arch/xtensa/kernel/entry.S | 2
+ arch/xtensa/kernel/traps.c | 7 -
+ drivers/gpu/drm/Kconfig | 2
+ drivers/media/platform/Kconfig | 2
+ drivers/video/backlight/Kconfig | 4
+ drivers/xen/preempt.c | 4
+ fs/btrfs/volumes.h | 2
+ fs/stack.c | 6 -
+ include/linux/fs.h | 4
+ include/linux/genhd.h | 6 -
+ include/linux/rcupdate.h | 4
+ include/xen/xen-ops.h | 4
+ kernel/Kconfig.locks | 12 +-
+ kernel/rcu/Kconfig | 4
+ kernel/rcu/rcutorture.c | 2
+ kernel/rcu/srcutiny.c | 2
+ kernel/rcu/tree.c | 4
+ kernel/rcu/tree_exp.h | 2
+ kernel/rcu/tree_plugin.h | 4
+ kernel/trace/trace.c | 2
+ kernel/workqueue.c | 2
+ lib/Kconfig.debug | 2
+ mm/memory.c | 2
+ mm/slub.c | 12 +-
+ net/core/dev.c | 2
+ 73 files changed, 191 insertions(+), 173 deletions(-)
+
+--- a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
++++ b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
+@@ -56,8 +56,8 @@ sections.
+ RCU-preempt Expedited Grace Periods</a></h2>
+
+ <p>
+-<tt>CONFIG_PREEMPT=y</tt> kernels implement RCU-preempt.
+-The overall flow of the handling of a given CPU by an RCU-preempt
++<tt>CONFIG_PREEMPT=y</tt> and <tt>CONFIG_PREEMPT_RT=y</tt> kernels implement
++RCU-preempt. The overall flow of the handling of a given CPU by an RCU-preempt
+ expedited grace period is shown in the following diagram:
+
+ <p><img src="ExpRCUFlow.svg" alt="ExpRCUFlow.svg" width="55%">
+@@ -140,8 +140,8 @@ or offline, among other things.
+ RCU-sched Expedited Grace Periods</a></h2>
+
+ <p>
+-<tt>CONFIG_PREEMPT=n</tt> kernels implement RCU-sched.
+-The overall flow of the handling of a given CPU by an RCU-sched
++<tt>CONFIG_PREEMPT=n</tt> and <tt>CONFIG_PREEMPT_RT=n</tt> kernels implement
++RCU-sched. The overall flow of the handling of a given CPU by an RCU-sched
+ expedited grace period is shown in the following diagram:
+
+ <p><img src="ExpSchedFlow.svg" alt="ExpSchedFlow.svg" width="55%">
+--- a/Documentation/RCU/Design/Requirements/Requirements.html
++++ b/Documentation/RCU/Design/Requirements/Requirements.html
+@@ -106,7 +106,7 @@ big RCU read-side critical section.
+ Production-quality implementations of <tt>rcu_read_lock()</tt> and
+ <tt>rcu_read_unlock()</tt> are extremely lightweight, and in
+ fact have exactly zero overhead in Linux kernels built for production
+-use with <tt>CONFIG_PREEMPT=n</tt>.
++use with <tt>CONFIG_PREEMPTION=n</tt>.
+
+ <p>
+ This guarantee allows ordering to be enforced with extremely low
+@@ -1499,7 +1499,7 @@ costs have plummeted.
+ However, as I learned from Matt Mackall's
+ <a href="http://elinux.org/Linux_Tiny-FAQ">bloatwatch</a>
+ efforts, memory footprint is critically important on single-CPU systems with
+-non-preemptible (<tt>CONFIG_PREEMPT=n</tt>) kernels, and thus
++non-preemptible (<tt>CONFIG_PREEMPTION=n</tt>) kernels, and thus
+ <a href="https://lkml.kernel.org/g/20090113221724.GA15307@linux.vnet.ibm.com">tiny RCU</a>
+ was born.
+ Josh Triplett has since taken over the small-memory banner with his
+@@ -1887,7 +1887,7 @@ constructs, there are limitations.
+ <p>
+ Implementations of RCU for which <tt>rcu_read_lock()</tt>
+ and <tt>rcu_read_unlock()</tt> generate no code, such as
+-Linux-kernel RCU when <tt>CONFIG_PREEMPT=n</tt>, can be
++Linux-kernel RCU when <tt>CONFIG_PREEMPTION=n</tt>, can be
+ nested arbitrarily deeply.
+ After all, there is no overhead.
+ Except that if all these instances of <tt>rcu_read_lock()</tt>
+@@ -2229,7 +2229,7 @@ be a no-op.
+ <p>
+ However, once the scheduler has spawned its first kthread, this early
+ boot trick fails for <tt>synchronize_rcu()</tt> (as well as for
+-<tt>synchronize_rcu_expedited()</tt>) in <tt>CONFIG_PREEMPT=y</tt>
++<tt>synchronize_rcu_expedited()</tt>) in <tt>CONFIG_PREEMPTION=y</tt>
+ kernels.
+ The reason is that an RCU read-side critical section might be preempted,
+ which means that a subsequent <tt>synchronize_rcu()</tt> really does have
+@@ -2568,7 +2568,7 @@ The compiler must not be permitted to tr
+
+ <p>
+ If the compiler did make this transformation in a
+-<tt>CONFIG_PREEMPT=n</tt> kernel build, and if <tt>get_user()</tt> did
++<tt>CONFIG_PREEMPTION=n</tt> kernel build, and if <tt>get_user()</tt> did
+ page fault, the result would be a quiescent state in the middle
+ of an RCU read-side critical section.
+ This misplaced quiescent state could result in line&nbsp;4 being
+@@ -2906,7 +2906,7 @@ in conjunction with the
+ The real-time-latency response requirements are such that the
+ traditional approach of disabling preemption across RCU
+ read-side critical sections is inappropriate.
+-Kernels built with <tt>CONFIG_PREEMPT=y</tt> therefore
++Kernels built with <tt>CONFIG_PREEMPTION=y</tt> therefore
+ use an RCU implementation that allows RCU read-side critical
+ sections to be preempted.
+ This requirement made its presence known after users made it
+@@ -3064,7 +3064,7 @@ includes
+ <tt>rcu_barrier_bh()</tt>, and
+ <tt>rcu_read_lock_bh_held()</tt>.
+ However, the update-side APIs are now simple wrappers for other RCU
+-flavors, namely RCU-sched in CONFIG_PREEMPT=n kernels and RCU-preempt
++flavors, namely RCU-sched in CONFIG_PREEMPTION=n kernels and RCU-preempt
+ otherwise.
+
+ <h3><a name="Sched Flavor">Sched Flavor (Historical)</a></h3>
+@@ -3088,12 +3088,12 @@ of an RCU read-side critical section can
+ Therefore, <i>RCU-sched</i> was created, which follows &ldquo;classic&rdquo;
+ RCU in that an RCU-sched grace period waits for for pre-existing
+ interrupt and NMI handlers.
+-In kernels built with <tt>CONFIG_PREEMPT=n</tt>, the RCU and RCU-sched
++In kernels built with <tt>CONFIG_PREEMPTION=n</tt>, the RCU and RCU-sched
+ APIs have identical implementations, while kernels built with
+-<tt>CONFIG_PREEMPT=y</tt> provide a separate implementation for each.
++<tt>CONFIG_PREEMPTION=y</tt> provide a separate implementation for each.
+
+ <p>
+-Note well that in <tt>CONFIG_PREEMPT=y</tt> kernels,
++Note well that in <tt>CONFIG_PREEMPTION=y</tt> kernels,
+ <tt>rcu_read_lock_sched()</tt> and <tt>rcu_read_unlock_sched()</tt>
+ disable and re-enable preemption, respectively.
+ This means that if there was a preemption attempt during the
+@@ -3302,12 +3302,12 @@ The tasks-RCU API is quite compact, cons
+ <tt>call_rcu_tasks()</tt>,
+ <tt>synchronize_rcu_tasks()</tt>, and
+ <tt>rcu_barrier_tasks()</tt>.
+-In <tt>CONFIG_PREEMPT=n</tt> kernels, trampolines cannot be preempted,
++In <tt>CONFIG_PREEMPTION=n</tt> kernels, trampolines cannot be preempted,
+ so these APIs map to
+ <tt>call_rcu()</tt>,
+ <tt>synchronize_rcu()</tt>, and
+ <tt>rcu_barrier()</tt>, respectively.
+-In <tt>CONFIG_PREEMPT=y</tt> kernels, trampolines can be preempted,
++In <tt>CONFIG_PREEMPTION=y</tt> kernels, trampolines can be preempted,
+ and these three APIs are therefore implemented by separate functions
+ that check for voluntary context switches.
+
+--- a/Documentation/RCU/checklist.txt
++++ b/Documentation/RCU/checklist.txt
+@@ -210,8 +210,8 @@ over a rather long period of time, but i
+ the rest of the system.
+
+ 7. As of v4.20, a given kernel implements only one RCU flavor,
+- which is RCU-sched for PREEMPT=n and RCU-preempt for PREEMPT=y.
+- If the updater uses call_rcu() or synchronize_rcu(),
++ which is RCU-sched for PREEMPTION=n and RCU-preempt for
++ PREEMPTION=y. If the updater uses call_rcu() or synchronize_rcu(),
+ then the corresponding readers my use rcu_read_lock() and
+ rcu_read_unlock(), rcu_read_lock_bh() and rcu_read_unlock_bh(),
+ or any pair of primitives that disables and re-enables preemption,
+--- a/Documentation/RCU/rcubarrier.txt
++++ b/Documentation/RCU/rcubarrier.txt
+@@ -6,8 +6,8 @@ RCU (read-copy update) is a synchronizat
+ of as a replacement for read-writer locking (among other things), but with
+ very low-overhead readers that are immune to deadlock, priority inversion,
+ and unbounded latency. RCU read-side critical sections are delimited
+-by rcu_read_lock() and rcu_read_unlock(), which, in non-CONFIG_PREEMPT
+-kernels, generate no code whatsoever.
++by rcu_read_lock() and rcu_read_unlock(), which, in
++non-CONFIG_PREEMPTION kernels, generate no code whatsoever.
+
+ This means that RCU writers are unaware of the presence of concurrent
+ readers, so that RCU updates to shared data must be undertaken quite
+@@ -303,10 +303,10 @@ Answer: This cannot happen. The reason i
+ to smp_call_function() and further to smp_call_function_on_cpu(),
+ causing this latter to spin until the cross-CPU invocation of
+ rcu_barrier_func() has completed. This by itself would prevent
+- a grace period from completing on non-CONFIG_PREEMPT kernels,
++ a grace period from completing on non-CONFIG_PREEMPTION kernels,
+ since each CPU must undergo a context switch (or other quiescent
+ state) before the grace period can complete. However, this is
+- of no use in CONFIG_PREEMPT kernels.
++ of no use in CONFIG_PREEMPTION kernels.
+
+ Therefore, on_each_cpu() disables preemption across its call
+ to smp_call_function() and also across the local call to
+--- a/Documentation/RCU/stallwarn.txt
++++ b/Documentation/RCU/stallwarn.txt
+@@ -20,7 +20,7 @@ o A CPU looping with preemption disabled
+
+ o A CPU looping with bottom halves disabled.
+
+-o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel
++o For !CONFIG_PREEMPTION kernels, a CPU looping anywhere in the kernel
+ without invoking schedule(). If the looping in the kernel is
+ really expected and desirable behavior, you might need to add
+ some calls to cond_resched().
+@@ -39,7 +39,7 @@ o Anything that prevents RCU's grace-per
+ result in the "rcu_.*kthread starved for" console-log message,
+ which will include additional debugging information.
+
+-o A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might
++o A CPU-bound real-time task in a CONFIG_PREEMPTION kernel, which might
+ happen to preempt a low-priority task in the middle of an RCU
+ read-side critical section. This is especially damaging if
+ that low-priority task is not permitted to run on any other CPU,
+--- a/Documentation/RCU/whatisRCU.txt
++++ b/Documentation/RCU/whatisRCU.txt
+@@ -648,9 +648,10 @@ Quick Quiz #1: Why is this argument naiv
+
+ This section presents a "toy" RCU implementation that is based on
+ "classic RCU". It is also short on performance (but only for updates) and
+-on features such as hotplug CPU and the ability to run in CONFIG_PREEMPT
+-kernels. The definitions of rcu_dereference() and rcu_assign_pointer()
+-are the same as those shown in the preceding section, so they are omitted.
++on features such as hotplug CPU and the ability to run in
++CONFIG_PREEMPTION kernels. The definitions of rcu_dereference() and
++rcu_assign_pointer() are the same as those shown in the preceding
++section, so they are omitted.
+
+ void rcu_read_lock(void) { }
+
+--- a/Documentation/trace/ftrace-uses.rst
++++ b/Documentation/trace/ftrace-uses.rst
+@@ -146,7 +146,7 @@ FTRACE_OPS_FL_RECURSION_SAFE
+ itself or any nested functions that those functions call.
+
+ If this flag is set, it is possible that the callback will also
+- be called with preemption enabled (when CONFIG_PREEMPT is set),
++ be called with preemption enabled (when CONFIG_PREEMPTION is set),
+ but this is not guaranteed.
+
+ FTRACE_OPS_FL_IPMODIFY
+--- a/arch/arc/kernel/entry.S
++++ b/arch/arc/kernel/entry.S
+@@ -337,11 +337,11 @@ ENTRY(ret_from_exception)
+ resume_kernel_mode:
+
+ ; Disable Interrupts from this point on
+- ; CONFIG_PREEMPT: This is a must for preempt_schedule_irq()
+- ; !CONFIG_PREEMPT: To ensure restore_regs is intr safe
++ ; CONFIG_PREEMPTION: This is a must for preempt_schedule_irq()
++ ; !CONFIG_PREEMPTION: To ensure restore_regs is intr safe
+ IRQ_DISABLE r9
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+
+ ; Can't preempt if preemption disabled
+ GET_CURR_THR_INFO_FROM_SP r10
+--- a/arch/arm/include/asm/switch_to.h
++++ b/arch/arm/include/asm/switch_to.h
+@@ -10,7 +10,7 @@
+ * to ensure that the maintenance completes in case we migrate to another
+ * CPU.
+ */
+-#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
++#if defined(CONFIG_PREEMPTION) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
+ #define __complete_pending_tlbi() dsb(ish)
+ #else
+ #define __complete_pending_tlbi()
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -211,7 +211,7 @@ ENDPROC(__dabt_svc)
+ svc_entry
+ irq_handler
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
+ ldr r0, [tsk, #TI_FLAGS] @ get flags
+ teq r8, #0 @ if preempt count != 0
+@@ -226,7 +226,7 @@ ENDPROC(__irq_svc)
+
+ .ltorg
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ svc_preempt:
+ mov r8, lr
+ 1: bl preempt_schedule_irq @ irq en/disable is done inside
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -248,6 +248,8 @@ void show_stack(struct task_struct *tsk,
+
+ #ifdef CONFIG_PREEMPT
+ #define S_PREEMPT " PREEMPT"
++#elif defined(CONFIG_PREEMPT_RT)
++#define S_PREEMPT " PREEMPT_RT"
+ #else
+ #define S_PREEMPT ""
+ #endif
+--- a/arch/arm/mm/cache-v7.S
++++ b/arch/arm/mm/cache-v7.S
+@@ -135,13 +135,13 @@ ENTRY(v7_flush_dcache_all)
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt skip @ skip if no cache, or just i-cache
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
+ #endif
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ restore_irqs_notrace r9
+ #endif
+ and r2, r1, #7 @ extract the length of the cache lines
+--- a/arch/arm/mm/cache-v7m.S
++++ b/arch/arm/mm/cache-v7m.S
+@@ -183,13 +183,13 @@ ENTRY(v7m_flush_dcache_all)
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt skip @ skip if no cache, or just i-cache
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
+ #endif
+ write_csselr r10, r1 @ set current cache level
+ isb @ isb to sych the new cssr&csidr
+ read_ccsidr r1 @ read the new csidr
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ restore_irqs_notrace r9
+ #endif
+ and r2, r1, #7 @ extract the length of the cache lines
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -35,32 +35,32 @@ config ARM64
+ select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
+- select ARCH_INLINE_READ_LOCK if !PREEMPT
+- select ARCH_INLINE_READ_LOCK_BH if !PREEMPT
+- select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPT
+- select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPT
+- select ARCH_INLINE_READ_UNLOCK if !PREEMPT
+- select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPT
+- select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPT
+- select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPT
+- select ARCH_INLINE_WRITE_LOCK if !PREEMPT
+- select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPT
+- select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPT
+- select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPT
+- select ARCH_INLINE_WRITE_UNLOCK if !PREEMPT
+- select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT
+- select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT
+- select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT
+- select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT
+- select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT
+- select ARCH_INLINE_SPIN_LOCK if !PREEMPT
+- select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT
+- select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT
+- select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT
+- select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT
+- select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT
+- select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT
+- select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT
++ select ARCH_INLINE_READ_LOCK if !PREEMPTION
++ select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
++ select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
++ select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION
++ select ARCH_INLINE_READ_UNLOCK if !PREEMPTION
++ select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION
++ select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION
++ select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION
++ select ARCH_INLINE_WRITE_LOCK if !PREEMPTION
++ select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION
++ select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION
++ select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION
++ select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION
++ select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION
++ select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION
++ select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION
++ select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION
++ select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION
++ select ARCH_INLINE_SPIN_LOCK if !PREEMPTION
++ select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION
++ select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION
++ select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION
++ select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION
++ select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
++ select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
++ select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
+ select ARCH_KEEP_MEMBLOCK
+ select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_USE_QUEUED_RWLOCKS
+--- a/arch/arm64/crypto/sha256-glue.c
++++ b/arch/arm64/crypto/sha256-glue.c
+@@ -97,7 +97,7 @@ static int sha256_update_neon(struct sha
+ * input when running on a preemptible kernel, but process the
+ * data block by block instead.
+ */
+- if (IS_ENABLED(CONFIG_PREEMPT) &&
++ if (IS_ENABLED(CONFIG_PREEMPTION) &&
+ chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE)
+ chunk = SHA256_BLOCK_SIZE -
+ sctx->count % SHA256_BLOCK_SIZE;
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -699,8 +699,8 @@ USER(\label, ic ivau, \tmp2) // invali
+ * where <label> is optional, and marks the point where execution will resume
+ * after a yield has been performed. If omitted, execution resumes right after
+ * the endif_yield_neon invocation. Note that the entire sequence, including
+- * the provided patchup code, will be omitted from the image if CONFIG_PREEMPT
+- * is not defined.
++ * the provided patchup code, will be omitted from the image if
++ * CONFIG_PREEMPTION is not defined.
+ *
+ * As a convenience, in the case where no patchup code is required, the above
+ * sequence may be abbreviated to
+@@ -728,7 +728,7 @@ USER(\label, ic ivau, \tmp2) // invali
+ .endm
+
+ .macro if_will_cond_yield_neon
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ get_current_task x0
+ ldr x0, [x0, #TSK_TI_PREEMPT]
+ sub x0, x0, #PREEMPT_DISABLE_OFFSET
+--- a/arch/arm64/include/asm/preempt.h
++++ b/arch/arm64/include/asm/preempt.h
+@@ -79,11 +79,11 @@ static inline bool should_resched(int pr
+ return pc == preempt_offset;
+ }
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ void preempt_schedule(void);
+ #define __preempt_schedule() preempt_schedule()
+ void preempt_schedule_notrace(void);
+ #define __preempt_schedule_notrace() preempt_schedule_notrace()
+-#endif /* CONFIG_PREEMPT */
++#endif /* CONFIG_PREEMPTION */
+
+ #endif /* __ASM_PREEMPT_H */
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -669,7 +669,7 @@ ENDPROC(el1_sync)
+
+ irq_handler
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
+ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ /*
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -143,9 +143,12 @@ void show_stack(struct task_struct *tsk,
+
+ #ifdef CONFIG_PREEMPT
+ #define S_PREEMPT " PREEMPT"
++#elif defined(CONFIG_PREEMPT_RT)
++#define S_PREEMPT " PREEMPT_RT"
+ #else
+ #define S_PREEMPT ""
+ #endif
++
+ #define S_SMP " SMP"
+
+ static int __die(const char *str, int err, struct pt_regs *regs)
+--- a/arch/c6x/kernel/entry.S
++++ b/arch/c6x/kernel/entry.S
+@@ -18,7 +18,7 @@
+ #define DP B14
+ #define SP B15
+
+-#ifndef CONFIG_PREEMPT
++#ifndef CONFIG_PREEMPTION
+ #define resume_kernel restore_all
+ #endif
+
+@@ -287,7 +287,7 @@
+ ;; is a little bit different
+ ;;
+ ENTRY(ret_from_exception)
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ MASK_INT B2
+ #endif
+
+@@ -557,7 +557,7 @@ ENDPROC(_nmi_handler)
+ ;;
+ ;; Jump to schedule() then return to ret_from_isr
+ ;;
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ resume_kernel:
+ GET_THREAD_INFO A12
+ LDW .D1T1 *+A12(THREAD_INFO_PREEMPT_COUNT),A1
+@@ -582,7 +582,7 @@ ENDPROC(_nmi_handler)
+ B .S2 preempt_schedule_irq
+ #endif
+ ADDKPC .S2 preempt_schedule,B3,4
+-#endif /* CONFIG_PREEMPT */
++#endif /* CONFIG_PREEMPTION */
+
+ ENTRY(enable_exception)
+ DINT
+--- a/arch/csky/kernel/entry.S
++++ b/arch/csky/kernel/entry.S
+@@ -277,7 +277,7 @@ ENTRY(csky_irq)
+ zero_fp
+ psrset ee
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ mov r9, sp /* Get current stack pointer */
+ bmaski r10, THREAD_SHIFT
+ andn r9, r10 /* Get thread_info */
+@@ -294,7 +294,7 @@ ENTRY(csky_irq)
+ mov a0, sp
+ jbsr csky_do_IRQ
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ subi r12, 1
+ stw r12, (r9, TINFO_PREEMPT)
+ cmpnei r12, 0
+--- a/arch/h8300/kernel/entry.S
++++ b/arch/h8300/kernel/entry.S
+@@ -284,12 +284,12 @@ INTERRUPTS = 128
+ mov.l er0,@(LER0:16,sp)
+ bra resume_userspace
+
+-#if !defined(CONFIG_PREEMPT)
++#if !defined(CONFIG_PREEMPTION)
+ #define resume_kernel restore_all
+ #endif
+
+ ret_from_exception:
+-#if defined(CONFIG_PREEMPT)
++#if defined(CONFIG_PREEMPTION)
+ orc #0xc0,ccr
+ #endif
+ ret_from_interrupt:
+@@ -319,7 +319,7 @@ INTERRUPTS = 128
+ restore_all:
+ RESTORE_ALL /* Does RTE */
+
+-#if defined(CONFIG_PREEMPT)
++#if defined(CONFIG_PREEMPTION)
+ resume_kernel:
+ mov.l @(TI_PRE_COUNT:16,er4),er0
+ bne restore_all:8
+--- a/arch/hexagon/kernel/vm_entry.S
++++ b/arch/hexagon/kernel/vm_entry.S
+@@ -265,12 +265,12 @@
+ * should be in the designated register (usually R19)
+ *
+ * If we were in kernel mode, we don't need to check scheduler
+- * or signals if CONFIG_PREEMPT is not set. If set, then it has
++ * or signals if CONFIG_PREEMPTION is not set. If set, then it has
+ * to jump to a need_resched kind of block.
+- * BTW, CONFIG_PREEMPT is not supported yet.
++ * BTW, CONFIG_PREEMPTION is not supported yet.
+ */
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ R0 = #VM_INT_DISABLE
+ trap1(#HVM_TRAP1_VMSETIE)
+ #endif
+--- a/arch/ia64/kernel/entry.S
++++ b/arch/ia64/kernel/entry.S
+@@ -670,12 +670,12 @@ GLOBAL_ENTRY(ia64_leave_syscall)
+ *
+ * p6 controls whether current_thread_info()->flags needs to be check for
+ * extra work. We always check for extra work when returning to user-level.
+- * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
++ * With CONFIG_PREEMPTION, we also check for extra work when the preempt_count
+ * is 0. After extra work processing has been completed, execution
+ * resumes at ia64_work_processed_syscall with p6 set to 1 if the extra-work-check
+ * needs to be redone.
+ */
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ RSM_PSR_I(p0, r2, r18) // disable interrupts
+ cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
+ (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
+@@ -685,7 +685,7 @@ GLOBAL_ENTRY(ia64_leave_syscall)
+ (pUStk) mov r21=0 // r21 <- 0
+ ;;
+ cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
+-#else /* !CONFIG_PREEMPT */
++#else /* !CONFIG_PREEMPTION */
+ RSM_PSR_I(pUStk, r2, r18)
+ cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
+ (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
+@@ -814,12 +814,12 @@ GLOBAL_ENTRY(ia64_leave_kernel)
+ *
+ * p6 controls whether current_thread_info()->flags needs to be check for
+ * extra work. We always check for extra work when returning to user-level.
+- * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
++ * With CONFIG_PREEMPTION, we also check for extra work when the preempt_count
+ * is 0. After extra work processing has been completed, execution
+ * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
+ * needs to be redone.
+ */
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ RSM_PSR_I(p0, r17, r31) // disable interrupts
+ cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
+ (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
+@@ -1120,7 +1120,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
+
+ /*
+ * On entry:
+- * r20 = &current->thread_info->pre_count (if CONFIG_PREEMPT)
++ * r20 = &current->thread_info->pre_count (if CONFIG_PREEMPTION)
+ * r31 = current->thread_info->flags
+ * On exit:
+ * p6 = TRUE if work-pending-check needs to be redone
+--- a/arch/ia64/kernel/kprobes.c
++++ b/arch/ia64/kernel/kprobes.c
+@@ -841,7 +841,7 @@ static int __kprobes pre_kprobes_handler
+ return 1;
+ }
+
+-#if !defined(CONFIG_PREEMPT)
++#if !defined(CONFIG_PREEMPTION)
+ if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
+ /* Boost up -- we can execute copied instructions directly */
+ ia64_psr(regs)->ri = p->ainsn.slot;
+--- a/arch/m68k/coldfire/entry.S
++++ b/arch/m68k/coldfire/entry.S
+@@ -108,7 +108,7 @@ ENTRY(system_call)
+ btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel */
+ jeq Luser_return /* if so, skip resched, signals */
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ movel %sp,%d1 /* get thread_info pointer */
+ andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
+ movel %d1,%a0
+--- a/arch/microblaze/kernel/entry.S
++++ b/arch/microblaze/kernel/entry.S
+@@ -728,7 +728,7 @@ irq_call:rtbd r0, do_IRQ;
+ bri 6f;
+ /* MS: Return to kernel state. */
+ 2:
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO;
+ /* MS: get preempt_count from thread info */
+ lwi r5, r11, TI_PREEMPT_COUNT;
+--- a/arch/mips/include/asm/asmmacro.h
++++ b/arch/mips/include/asm/asmmacro.h
+@@ -63,7 +63,7 @@
+ .endm
+
+ .macro local_irq_disable reg=t0
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ lw \reg, TI_PRE_COUNT($28)
+ addi \reg, \reg, 1
+ sw \reg, TI_PRE_COUNT($28)
+@@ -73,7 +73,7 @@
+ xori \reg, \reg, 1
+ mtc0 \reg, CP0_STATUS
+ irq_disable_hazard
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ lw \reg, TI_PRE_COUNT($28)
+ addi \reg, \reg, -1
+ sw \reg, TI_PRE_COUNT($28)
+--- a/arch/mips/kernel/entry.S
++++ b/arch/mips/kernel/entry.S
+@@ -19,7 +19,7 @@
+ #include <asm/thread_info.h>
+ #include <asm/war.h>
+
+-#ifndef CONFIG_PREEMPT
++#ifndef CONFIG_PREEMPTION
+ #define resume_kernel restore_all
+ #else
+ #define __ret_from_irq ret_from_exception
+@@ -27,7 +27,7 @@
+
+ .text
+ .align 5
+-#ifndef CONFIG_PREEMPT
++#ifndef CONFIG_PREEMPTION
+ FEXPORT(ret_from_exception)
+ local_irq_disable # preempt stop
+ b __ret_from_irq
+@@ -53,7 +53,7 @@ FEXPORT(__ret_from_irq)
+ bnez t0, work_pending
+ j restore_all
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ resume_kernel:
+ local_irq_disable
+ lw t0, TI_PRE_COUNT($28)
+--- a/arch/nds32/Kconfig
++++ b/arch/nds32/Kconfig
+@@ -61,7 +61,7 @@ config GENERIC_HWEIGHT
+
+ config GENERIC_LOCKBREAK
+ def_bool y
+- depends on PREEMPT
++ depends on PREEMPTION
+
+ config TRACE_IRQFLAGS_SUPPORT
+ def_bool y
+--- a/arch/nds32/kernel/ex-exit.S
++++ b/arch/nds32/kernel/ex-exit.S
+@@ -72,7 +72,7 @@
+ restore_user_regs_last
+ .endm
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ .macro preempt_stop
+ .endm
+ #else
+@@ -158,7 +158,7 @@ ENTRY(ret_slow_syscall)
+ /*
+ * preemptive kernel
+ */
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ resume_kernel:
+ gie_disable
+ lwi $t0, [tsk+#TSK_TI_PREEMPT]
+--- a/arch/nios2/kernel/entry.S
++++ b/arch/nios2/kernel/entry.S
+@@ -365,7 +365,7 @@ ENTRY(ret_from_interrupt)
+ ldw r1, PT_ESTATUS(sp) /* check if returning to kernel */
+ TSTBNZ r1, r1, ESTATUS_EU, Luser_return
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ GET_THREAD_INFO r1
+ ldw r4, TI_PREEMPT_COUNT(r1)
+ bne r4, r0, restore_all
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -81,7 +81,7 @@ config STACK_GROWSUP
+ config GENERIC_LOCKBREAK
+ bool
+ default y
+- depends on SMP && PREEMPT
++ depends on SMP && PREEMPTION
+
+ config ARCH_HAS_ILOG2_U32
+ bool
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -940,14 +940,14 @@ ENTRY(intr_return)
+ rfi
+ nop
+
+-#ifndef CONFIG_PREEMPT
++#ifndef CONFIG_PREEMPTION
+ # define intr_do_preempt intr_restore
+-#endif /* !CONFIG_PREEMPT */
++#endif /* !CONFIG_PREEMPTION */
+
+ .import schedule,code
+ intr_do_resched:
+ /* Only call schedule on return to userspace. If we're returning
+- * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
++ * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise
+ * we jump back to intr_restore.
+ */
+ LDREG PT_IASQ0(%r16), %r20
+@@ -979,7 +979,7 @@ ENTRY(intr_return)
+ * and preempt_count is 0. otherwise, we continue on
+ * our merry way back to the current running task.
+ */
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ .import preempt_schedule_irq,code
+ intr_do_preempt:
+ rsm PSW_SM_I, %r0 /* disable interrupts */
+@@ -999,7 +999,7 @@ ENTRY(intr_return)
+ nop
+
+ b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
+-#endif /* CONFIG_PREEMPT */
++#endif /* CONFIG_PREEMPTION */
+
+ /*
+ * External interrupts.
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -106,7 +106,7 @@ config LOCKDEP_SUPPORT
+ config GENERIC_LOCKBREAK
+ bool
+ default y
+- depends on SMP && PREEMPT
++ depends on SMP && PREEMPTION
+
+ config GENERIC_HWEIGHT
+ bool
+--- a/arch/powerpc/kernel/entry_32.S
++++ b/arch/powerpc/kernel/entry_32.S
+@@ -897,7 +897,7 @@ user_exc_return: /* r10 contains MSR_KE
+ bne- 0b
+ 1:
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ /* check current_thread_info->preempt_count */
+ lwz r0,TI_PREEMPT(r2)
+ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
+@@ -921,7 +921,7 @@ user_exc_return: /* r10 contains MSR_KE
+ */
+ bl trace_hardirqs_on
+ #endif
+-#endif /* CONFIG_PREEMPT */
++#endif /* CONFIG_PREEMPTION */
+ restore_kuap:
+ kuap_restore r1, r2, r9, r10, r0
+
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -846,7 +846,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+ bne- 0b
+ 1:
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ /* Check if we need to preempt */
+ andi. r0,r4,_TIF_NEED_RESCHED
+ beq+ restore
+@@ -877,7 +877,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+ li r10,MSR_RI
+ mtmsrd r10,1 /* Update machine state */
+ #endif /* CONFIG_PPC_BOOK3E */
+-#endif /* CONFIG_PREEMPT */
++#endif /* CONFIG_PREEMPTION */
+
+ .globl fast_exc_return_irq
+ fast_exc_return_irq:
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -251,14 +251,19 @@ NOKPROBE_SYMBOL(oops_end);
+
+ static int __die(const char *str, struct pt_regs *regs, long err)
+ {
++ const char *pr = "";
++
+ printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
+
++ if (IS_ENABLED(CONFIG_PREEMPTION))
++ pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
++
+ printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s%s %s\n",
+ IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
+ PAGE_SIZE / 1024,
+ early_radix_enabled() ? " MMU=Radix" : "",
+ early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ? " MMU=Hash" : "",
+- IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
++ pr,
+ IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
+ IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
+ debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
+--- a/arch/riscv/kernel/entry.S
++++ b/arch/riscv/kernel/entry.S
+@@ -155,7 +155,7 @@
+ REG_L x2, PT_SP(sp)
+ .endm
+
+-#if !IS_ENABLED(CONFIG_PREEMPT)
++#if !IS_ENABLED(CONFIG_PREEMPTION)
+ .set resume_kernel, restore_all
+ #endif
+
+@@ -269,7 +269,7 @@ ENTRY(handle_exception)
+ RESTORE_ALL
+ sret
+
+-#if IS_ENABLED(CONFIG_PREEMPT)
++#if IS_ENABLED(CONFIG_PREEMPTION)
+ resume_kernel:
+ REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
+ bnez s0, restore_all
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -30,7 +30,7 @@ config GENERIC_BUG_RELATIVE_POINTERS
+ def_bool y
+
+ config GENERIC_LOCKBREAK
+- def_bool y if PREEMPT
++ def_bool y if PREEMPTTION
+
+ config PGSTE
+ def_bool y if KVM
+--- a/arch/s390/include/asm/preempt.h
++++ b/arch/s390/include/asm/preempt.h
+@@ -130,11 +130,11 @@ static inline bool should_resched(int pr
+
+ #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ extern asmlinkage void preempt_schedule(void);
+ #define __preempt_schedule() preempt_schedule()
+ extern asmlinkage void preempt_schedule_notrace(void);
+ #define __preempt_schedule_notrace() preempt_schedule_notrace()
+-#endif /* CONFIG_PREEMPT */
++#endif /* CONFIG_PREEMPTION */
+
+ #endif /* __ASM_PREEMPT_H */
+--- a/arch/s390/kernel/dumpstack.c
++++ b/arch/s390/kernel/dumpstack.c
+@@ -194,6 +194,8 @@ void die(struct pt_regs *regs, const cha
+ regs->int_code >> 17, ++die_counter);
+ #ifdef CONFIG_PREEMPT
+ pr_cont("PREEMPT ");
++#elif defined(CONFIG_PREEMPT_RT)
++ pr_cont("PREEMPT_RT ");
+ #endif
+ pr_cont("SMP ");
+ if (debug_pagealloc_enabled())
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -790,7 +790,7 @@ ENTRY(io_int_handler)
+ .Lio_work:
+ tm __PT_PSW+1(%r11),0x01 # returning to user ?
+ jo .Lio_work_user # yes -> do resched & signal
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ # check for preemptive scheduling
+ icm %r0,15,__LC_PREEMPT_COUNT
+ jnz .Lio_restore # preemption is disabled
+--- a/arch/sh/Kconfig
++++ b/arch/sh/Kconfig
+@@ -108,7 +108,7 @@ config GENERIC_CALIBRATE_DELAY
+
+ config GENERIC_LOCKBREAK
+ def_bool y
+- depends on SMP && PREEMPT
++ depends on SMP && PREEMPTION
+
+ config ARCH_SUSPEND_POSSIBLE
+ def_bool n
+--- a/arch/sh/kernel/cpu/sh5/entry.S
++++ b/arch/sh/kernel/cpu/sh5/entry.S
+@@ -86,7 +86,7 @@
+ andi r6, ~0xf0, r6; \
+ putcon r6, SR;
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ # define preempt_stop() CLI()
+ #else
+ # define preempt_stop()
+@@ -884,7 +884,7 @@ LRESVEC_block_end: /* Marker. Unused.
+
+ /* Check softirqs */
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ pta ret_from_syscall, tr0
+ blink tr0, ZERO
+
+--- a/arch/sh/kernel/entry-common.S
++++ b/arch/sh/kernel/entry-common.S
+@@ -41,7 +41,7 @@
+ */
+ #include <asm/dwarf.h>
+
+-#if defined(CONFIG_PREEMPT)
++#if defined(CONFIG_PREEMPTION)
+ # define preempt_stop() cli ; TRACE_IRQS_OFF
+ #else
+ # define preempt_stop()
+@@ -84,7 +84,7 @@ ENTRY(ret_from_irq)
+ get_current_thread_info r8, r0
+ bt resume_kernel ! Yes, it's from kernel, go back soon
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ bra resume_userspace
+ nop
+ ENTRY(resume_kernel)
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -277,7 +277,7 @@ config US3_MC
+ config GENERIC_LOCKBREAK
+ bool
+ default y
+- depends on SPARC64 && SMP && PREEMPT
++ depends on SPARC64 && SMP && PREEMPTION
+
+ config NUMA
+ bool "NUMA support"
+--- a/arch/sparc/kernel/rtrap_64.S
++++ b/arch/sparc/kernel/rtrap_64.S
+@@ -310,7 +310,7 @@ kern_rtt: rdpr %canrestore, %g1
+ retry
+
+ to_kernel:
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ ldsw [%g6 + TI_PRE_COUNT], %l5
+ brnz %l5, kern_fpucheck
+ ldx [%g6 + TI_FLAGS], %l5
+--- a/arch/xtensa/kernel/entry.S
++++ b/arch/xtensa/kernel/entry.S
+@@ -520,7 +520,7 @@ ENTRY(kernel_exception)
+ call4 schedule # void schedule (void)
+ j 1b
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ 6:
+ _bbci.l a4, TIF_NEED_RESCHED, 4f
+
+--- a/arch/xtensa/kernel/traps.c
++++ b/arch/xtensa/kernel/traps.c
+@@ -524,12 +524,15 @@ DEFINE_SPINLOCK(die_lock);
+ void die(const char * str, struct pt_regs * regs, long err)
+ {
+ static int die_counter;
++ const char *pr = "";
++
++ if (IS_ENABLED(CONFIG_PREEMPTION))
++ pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
+
+ console_verbose();
+ spin_lock_irq(&die_lock);
+
+- pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter,
+- IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "");
++ pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, pr);
+ show_regs(regs);
+ if (!user_mode(regs))
+ show_stack(NULL, (unsigned long*)regs->areg[1]);
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -397,7 +397,7 @@ config DRM_R128
+
+ config DRM_I810
+ tristate "Intel I810"
+- # !PREEMPT because of missing ioctl locking
++ # !PREEMPTION because of missing ioctl locking
+ depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN)
+ help
+ Choose this option if you have an Intel I810 graphics card. If M is
+--- a/drivers/media/platform/Kconfig
++++ b/drivers/media/platform/Kconfig
+@@ -585,7 +585,7 @@ config VIDEO_MESON_G12A_AO_CEC
+
+ config CEC_GPIO
+ tristate "Generic GPIO-based CEC driver"
+- depends on PREEMPT || COMPILE_TEST
++ depends on PREEMPTION || COMPILE_TEST
+ select CEC_CORE
+ select CEC_PIN
+ select GPIOLIB
+--- a/drivers/video/backlight/Kconfig
++++ b/drivers/video/backlight/Kconfig
+@@ -99,7 +99,7 @@ config LCD_TOSA
+
+ config LCD_HP700
+ tristate "HP Jornada 700 series LCD Driver"
+- depends on SA1100_JORNADA720_SSP && !PREEMPT
++ depends on SA1100_JORNADA720_SSP && !PREEMPTION
+ default y
+ help
+ If you have an HP Jornada 700 series handheld (710/720/728)
+@@ -228,7 +228,7 @@ config BACKLIGHT_HP680
+
+ config BACKLIGHT_HP700
+ tristate "HP Jornada 700 series Backlight Driver"
+- depends on SA1100_JORNADA720_SSP && !PREEMPT
++ depends on SA1100_JORNADA720_SSP && !PREEMPTION
+ default y
+ help
+ If you have an HP Jornada 700 series,
+--- a/drivers/xen/preempt.c
++++ b/drivers/xen/preempt.c
+@@ -8,7 +8,7 @@
+ #include <linux/sched.h>
+ #include <xen/xen-ops.h>
+
+-#ifndef CONFIG_PREEMPT
++#ifndef CONFIG_PREEMPTION
+
+ /*
+ * Some hypercalls issued by the toolstack can take many 10s of
+@@ -37,4 +37,4 @@ asmlinkage __visible void xen_maybe_pree
+ __this_cpu_write(xen_in_preemptible_hcall, true);
+ }
+ }
+-#endif /* CONFIG_PREEMPT */
++#endif /* CONFIG_PREEMPTION */
+--- a/fs/btrfs/volumes.h
++++ b/fs/btrfs/volumes.h
+@@ -179,7 +179,7 @@ btrfs_device_set_##name(struct btrfs_dev
+ write_seqcount_end(&dev->data_seqcount); \
+ preempt_enable(); \
+ }
+-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
++#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
+ #define BTRFS_DEVICE_GETSET_FUNCS(name) \
+ static inline u64 \
+ btrfs_device_get_##name(const struct btrfs_device *dev) \
+--- a/fs/stack.c
++++ b/fs/stack.c
+@@ -23,7 +23,7 @@ void fsstack_copy_inode_size(struct inod
+
+ /*
+ * But on 32-bit, we ought to make an effort to keep the two halves of
+- * i_blocks in sync despite SMP or PREEMPT - though stat's
++ * i_blocks in sync despite SMP or PREEMPTION - though stat's
+ * generic_fillattr() doesn't bother, and we won't be applying quotas
+ * (where i_blocks does become important) at the upper level.
+ *
+@@ -38,14 +38,14 @@ void fsstack_copy_inode_size(struct inod
+ spin_unlock(&src->i_lock);
+
+ /*
+- * If CONFIG_SMP or CONFIG_PREEMPT on 32-bit, it's vital for
++ * If CONFIG_SMP or CONFIG_PREEMPTION on 32-bit, it's vital for
+ * fsstack_copy_inode_size() to hold some lock around
+ * i_size_write(), otherwise i_size_read() may spin forever (see
+ * include/linux/fs.h). We don't necessarily hold i_mutex when this
+ * is called, so take i_lock for that case.
+ *
+ * And if on 32-bit, continue our effort to keep the two halves of
+- * i_blocks in sync despite SMP or PREEMPT: use i_lock for that case
++ * i_blocks in sync despite SMP or PREEMPTION: use i_lock for that case
+ * too, and do both at once by combining the tests.
+ *
+ * There is none of this locking overhead in the 64-bit case.
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -855,7 +855,7 @@ static inline loff_t i_size_read(const s
+ i_size = inode->i_size;
+ } while (read_seqcount_retry(&inode->i_size_seqcount, seq));
+ return i_size;
+-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
++#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
+ loff_t i_size;
+
+ preempt_disable();
+@@ -880,7 +880,7 @@ static inline void i_size_write(struct i
+ inode->i_size = i_size;
+ write_seqcount_end(&inode->i_size_seqcount);
+ preempt_enable();
+-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
++#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
+ preempt_disable();
+ inode->i_size = i_size;
+ preempt_enable();
+--- a/include/linux/genhd.h
++++ b/include/linux/genhd.h
+@@ -717,7 +717,7 @@ static inline void hd_free_part(struct h
+ * accessor function.
+ *
+ * Code written along the lines of i_size_read() and i_size_write().
+- * CONFIG_PREEMPT case optimizes the case of UP kernel with preemption
++ * CONFIG_PREEMPTION case optimizes the case of UP kernel with preemption
+ * on.
+ */
+ static inline sector_t part_nr_sects_read(struct hd_struct *part)
+@@ -730,7 +730,7 @@ static inline sector_t part_nr_sects_rea
+ nr_sects = part->nr_sects;
+ } while (read_seqcount_retry(&part->nr_sects_seq, seq));
+ return nr_sects;
+-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
++#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
+ sector_t nr_sects;
+
+ preempt_disable();
+@@ -753,7 +753,7 @@ static inline void part_nr_sects_write(s
+ write_seqcount_begin(&part->nr_sects_seq);
+ part->nr_sects = size;
+ write_seqcount_end(&part->nr_sects_seq);
+-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
++#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
+ preempt_disable();
+ part->nr_sects = size;
+ preempt_enable();
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -154,7 +154,7 @@ static inline void exit_tasks_rcu_finish
+ *
+ * This macro resembles cond_resched(), except that it is defined to
+ * report potential quiescent states to RCU-tasks even if the cond_resched()
+- * machinery were to be shut off, as some advocate for PREEMPT kernels.
++ * machinery were to be shut off, as some advocate for PREEMPTION kernels.
+ */
+ #define cond_resched_tasks_rcu_qs() \
+ do { \
+@@ -580,7 +580,7 @@ do { \
+ *
+ * You can avoid reading and understanding the next paragraph by
+ * following this rule: don't put anything in an rcu_read_lock() RCU
+- * read-side critical section that would block in a !PREEMPT kernel.
++ * read-side critical section that would block in a !PREEMPTION kernel.
+ * But if you want the full story, read on!
+ *
+ * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
+--- a/include/xen/xen-ops.h
++++ b/include/xen/xen-ops.h
+@@ -215,7 +215,7 @@ bool xen_running_on_version_or_later(uns
+ void xen_efi_runtime_setup(void);
+
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+
+ static inline void xen_preemptible_hcall_begin(void)
+ {
+@@ -239,6 +239,6 @@ static inline void xen_preemptible_hcall
+ __this_cpu_write(xen_in_preemptible_hcall, false);
+ }
+
+-#endif /* CONFIG_PREEMPT */
++#endif /* CONFIG_PREEMPTION */
+
+ #endif /* INCLUDE_XEN_OPS_H */
+--- a/kernel/Kconfig.locks
++++ b/kernel/Kconfig.locks
+@@ -101,7 +101,7 @@ config UNINLINE_SPIN_UNLOCK
+ # unlock and unlock_irq functions are inlined when:
+ # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
+ # or
+-# - DEBUG_SPINLOCK=n and PREEMPT=n
++# - DEBUG_SPINLOCK=n and PREEMPTION=n
+ #
+ # unlock_bh and unlock_irqrestore functions are inlined when:
+ # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
+@@ -139,7 +139,7 @@ config INLINE_SPIN_UNLOCK_BH
+
+ config INLINE_SPIN_UNLOCK_IRQ
+ def_bool y
+- depends on !PREEMPT || ARCH_INLINE_SPIN_UNLOCK_IRQ
++ depends on !PREEMPTION || ARCH_INLINE_SPIN_UNLOCK_IRQ
+
+ config INLINE_SPIN_UNLOCK_IRQRESTORE
+ def_bool y
+@@ -168,7 +168,7 @@ config INLINE_READ_LOCK_IRQSAVE
+
+ config INLINE_READ_UNLOCK
+ def_bool y
+- depends on !PREEMPT || ARCH_INLINE_READ_UNLOCK
++ depends on !PREEMPTION || ARCH_INLINE_READ_UNLOCK
+
+ config INLINE_READ_UNLOCK_BH
+ def_bool y
+@@ -176,7 +176,7 @@ config INLINE_READ_UNLOCK_BH
+
+ config INLINE_READ_UNLOCK_IRQ
+ def_bool y
+- depends on !PREEMPT || ARCH_INLINE_READ_UNLOCK_IRQ
++ depends on !PREEMPTION || ARCH_INLINE_READ_UNLOCK_IRQ
+
+ config INLINE_READ_UNLOCK_IRQRESTORE
+ def_bool y
+@@ -205,7 +205,7 @@ config INLINE_WRITE_LOCK_IRQSAVE
+
+ config INLINE_WRITE_UNLOCK
+ def_bool y
+- depends on !PREEMPT || ARCH_INLINE_WRITE_UNLOCK
++ depends on !PREEMPTION || ARCH_INLINE_WRITE_UNLOCK
+
+ config INLINE_WRITE_UNLOCK_BH
+ def_bool y
+@@ -213,7 +213,7 @@ config INLINE_WRITE_UNLOCK_BH
+
+ config INLINE_WRITE_UNLOCK_IRQ
+ def_bool y
+- depends on !PREEMPT || ARCH_INLINE_WRITE_UNLOCK_IRQ
++ depends on !PREEMPTION || ARCH_INLINE_WRITE_UNLOCK_IRQ
+
+ config INLINE_WRITE_UNLOCK_IRQRESTORE
+ def_bool y
+--- a/kernel/rcu/Kconfig
++++ b/kernel/rcu/Kconfig
+@@ -200,8 +200,8 @@ config RCU_NOCB_CPU
+ specified at boot time by the rcu_nocbs parameter. For each
+ such CPU, a kthread ("rcuox/N") will be created to invoke
+ callbacks, where the "N" is the CPU being offloaded, and where
+- the "p" for RCU-preempt (PREEMPT kernels) and "s" for RCU-sched
+- (!PREEMPT kernels). Nothing prevents this kthread from running
++ the "p" for RCU-preempt (PREEMPTION kernels) and "s" for RCU-sched
++ (!PREEMPTION kernels). Nothing prevents this kthread from running
+ on the specified CPUs, but (1) the kthreads may be preempted
+ between each callback, and (2) affinity or cgroups can be used
+ to force the kthreads to run on whatever set of CPUs is desired.
+--- a/kernel/rcu/rcutorture.c
++++ b/kernel/rcu/rcutorture.c
+@@ -1725,7 +1725,7 @@ static void rcu_torture_fwd_cb_cr(struct
+ // Give the scheduler a chance, even on nohz_full CPUs.
+ static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
+ {
+- if (IS_ENABLED(CONFIG_PREEMPT) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
++ if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
+ // Real call_rcu() floods hit userspace, so emulate that.
+ if (need_resched() || (iter & 0xfff))
+ schedule();
+--- a/kernel/rcu/srcutiny.c
++++ b/kernel/rcu/srcutiny.c
+@@ -103,7 +103,7 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
+
+ /*
+ * Workqueue handler to drive one grace period and invoke any callbacks
+- * that become ready as a result. Single-CPU and !PREEMPT operation
++ * that become ready as a result. Single-CPU and !PREEMPTION operation
+ * means that we get away with murder on synchronization. ;-)
+ */
+ void srcu_drive_gp(struct work_struct *wp)
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -2667,9 +2667,9 @@ EXPORT_SYMBOL_GPL(kfree_call_rcu);
+
+ /*
+ * During early boot, any blocking grace-period wait automatically
+- * implies a grace period. Later on, this is never the case for PREEMPT.
++ * implies a grace period. Later on, this is never the case for PREEMPTION.
+ *
+- * Howevr, because a context switch is a grace period for !PREEMPT, any
++ * Howevr, because a context switch is a grace period for !PREEMPTION, any
+ * blocking grace-period wait automatically implies a grace period if
+ * there is only one CPU online at any point time during execution of
+ * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
+--- a/kernel/rcu/tree_exp.h
++++ b/kernel/rcu/tree_exp.h
+@@ -670,7 +670,7 @@ static void rcu_exp_handler(void *unused
+ }
+ }
+
+-/* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */
++/* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */
+ static void sync_sched_exp_online_cleanup(int cpu)
+ {
+ }
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -788,7 +788,7 @@ static void __init rcu_bootup_announce(v
+ }
+
+ /*
+- * Note a quiescent state for PREEMPT=n. Because we do not need to know
++ * Note a quiescent state for PREEMPTION=n. Because we do not need to know
+ * how many quiescent states passed, just if there was at least one since
+ * the start of the grace period, this just sets a flag. The caller must
+ * have disabled preemption.
+@@ -838,7 +838,7 @@ void rcu_all_qs(void)
+ EXPORT_SYMBOL_GPL(rcu_all_qs);
+
+ /*
+- * Note a PREEMPT=n context switch. The caller must have disabled interrupts.
++ * Note a PREEMPTION=n context switch. The caller must have disabled interrupts.
+ */
+ void rcu_note_context_switch(bool preempt)
+ {
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3654,6 +3654,8 @@ print_trace_header(struct seq_file *m, s
+ "desktop",
+ #elif defined(CONFIG_PREEMPT)
+ "preempt",
++#elif defined(CONFIG_PREEMPT_RT)
++ "preempt_rt",
+ #else
+ "unknown",
+ #endif
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -2279,7 +2279,7 @@ static void process_one_work(struct work
+ }
+
+ /*
+- * The following prevents a kworker from hogging CPU on !PREEMPT
++ * The following prevents a kworker from hogging CPU on !PREEMPTION
+ * kernels, where a requeueing work item waiting for something to
+ * happen could deadlock with stop_machine as such work item could
+ * indefinitely requeue itself while all other CPUs are trapped in
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -1070,7 +1070,7 @@ config DEBUG_TIMEKEEPING
+
+ config DEBUG_PREEMPT
+ bool "Debug preemptible kernel"
+- depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
++ depends on DEBUG_KERNEL && PREEMPTION && TRACE_IRQFLAGS_SUPPORT
+ default y
+ help
+ If you say Y here then the kernel will use a debug variant of the
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2133,7 +2133,7 @@ static inline int pte_unmap_same(struct
+ pte_t *page_table, pte_t orig_pte)
+ {
+ int same = 1;
+-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
+ if (sizeof(pte_t) > sizeof(unsigned long)) {
+ spinlock_t *ptl = pte_lockptr(mm, pmd);
+ spin_lock(ptl);
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1963,7 +1963,7 @@ static void *get_partial(struct kmem_cac
+ return get_any_partial(s, flags, c);
+ }
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ /*
+ * Calculate the next globally unique transaction for disambiguiation
+ * during cmpxchg. The transactions start with the cpu number and are then
+@@ -2008,7 +2008,7 @@ static inline void note_cmpxchg_failure(
+
+ pr_info("%s %s: cmpxchg redo ", n, s->name);
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
+ pr_warn("due to cpu change %d -> %d\n",
+ tid_to_cpu(tid), tid_to_cpu(actual_tid));
+@@ -2636,7 +2636,7 @@ static void *__slab_alloc(struct kmem_ca
+ unsigned long flags;
+
+ local_irq_save(flags);
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ /*
+ * We may have been preempted and rescheduled on a different
+ * cpu before disabling interrupts. Need to reload cpu area
+@@ -2690,13 +2690,13 @@ static __always_inline void *slab_alloc_
+ * as we end up on the original cpu again when doing the cmpxchg.
+ *
+ * We should guarantee that tid and kmem_cache are retrieved on
+- * the same cpu. It could be different if CONFIG_PREEMPT so we need
++ * the same cpu. It could be different if CONFIG_PREEMPTION so we need
+ * to check if it is matched or not.
+ */
+ do {
+ tid = this_cpu_read(s->cpu_slab->tid);
+ c = raw_cpu_ptr(s->cpu_slab);
+- } while (IS_ENABLED(CONFIG_PREEMPT) &&
++ } while (IS_ENABLED(CONFIG_PREEMPTION) &&
+ unlikely(tid != READ_ONCE(c->tid)));
+
+ /*
+@@ -2970,7 +2970,7 @@ static __always_inline void do_slab_free
+ do {
+ tid = this_cpu_read(s->cpu_slab->tid);
+ c = raw_cpu_ptr(s->cpu_slab);
+- } while (IS_ENABLED(CONFIG_PREEMPT) &&
++ } while (IS_ENABLED(CONFIG_PREEMPTION) &&
+ unlikely(tid != READ_ONCE(c->tid)));
+
+ /* Same with comment on barrier() in slab_alloc_node() */
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -820,7 +820,7 @@ EXPORT_SYMBOL(dev_get_by_napi_id);
+ *
+ * The use of raw_seqcount_begin() and cond_resched() before
+ * retrying is required as we want to give the writers a chance
+- * to complete when CONFIG_PREEMPT is not set.
++ * to complete when CONFIG_PREEMPTION is not set.
+ */
+ int netdev_get_name(struct net *net, char *name, int ifindex)
+ {