summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt
diff options
context:
space:
mode:
authorRomain Perier <romain.perier@gmail.com>2019-07-16 13:34:39 +0200
committerRomain Perier <romain.perier@opensource.viveris.fr>2019-07-16 14:43:21 +0200
commit0627ea7ef0e15a79a2d4b48752c65e62c436e08b (patch)
tree0c9efe59ed5cf66d6f3a5c857ff0caa3f22468c2 /debian/patches-rt
parentf38bda056536ec5b99444c22fb25a82a61b4c970 (diff)
downloadlinux-debian-0627ea7ef0e15a79a2d4b48752c65e62c436e08b.tar.gz
[rt] Update to 5.2-rt1
Diffstat (limited to 'debian/patches-rt')
-rw-r--r--debian/patches-rt/0001-ARM-at91-move-SoC-specific-definitions-to-SoC-folder.patch607
-rw-r--r--debian/patches-rt/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch16
-rw-r--r--debian/patches-rt/0001-printk-rb-add-printk-ring-buffer-documentation.patch2
-rw-r--r--debian/patches-rt/0001-x86-fpu-Remove-fpu-initialized-usage-in-__fpu__resto.patch153
-rw-r--r--debian/patches-rt/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch22
-rw-r--r--debian/patches-rt/0002-clocksource-drivers-tcb_clksrc-stop-depending-on-atm.patch252
-rw-r--r--debian/patches-rt/0002-printk-rb-add-prb-locking-functions.patch8
-rw-r--r--debian/patches-rt/0002-x86-fpu-Remove-fpu__restore.patch98
-rw-r--r--debian/patches-rt/0003-clocksource-drivers-tcb_clksrc-Use-tcb-as-sched_cloc.patch74
-rw-r--r--debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch162
-rw-r--r--debian/patches-rt/0003-printk-rb-define-ring-buffer-struct-and-initializer.patch2
-rw-r--r--debian/patches-rt/0003-x86-fpu-Remove-preempt_disable-in-fpu__clear.patch43
-rw-r--r--debian/patches-rt/0004-ARM-at91-Implement-clocksource-selection.patch46
-rw-r--r--debian/patches-rt/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch34
-rw-r--r--debian/patches-rt/0004-printk-rb-add-writer-interface.patch2
-rw-r--r--debian/patches-rt/0004-x86-fpu-Always-init-the-state-in-fpu__clear.patch75
-rw-r--r--debian/patches-rt/0005-clocksource-drivers-tcb_clksrc-move-Kconfig-option.patch55
-rw-r--r--debian/patches-rt/0005-printk-rb-add-basic-non-blocking-reading-interface.patch2
-rw-r--r--debian/patches-rt/0005-x86-fpu-Remove-fpu-initialized-usage-in-copy_fpstate.patch90
-rw-r--r--debian/patches-rt/0006-clocksource-drivers-timer-atmel-pit-rework-Kconfig-o.patch30
-rw-r--r--debian/patches-rt/0006-printk-rb-add-blocking-reader-support.patch2
-rw-r--r--debian/patches-rt/0006-x86-fpu-Don-t-save-fxregs-for-ia32-frames-in-copy_fp.patch83
-rw-r--r--debian/patches-rt/0007-clocksource-drivers-tcb_clksrc-Rename-the-file-for-c.patch990
-rw-r--r--debian/patches-rt/0007-printk-rb-add-functionality-required-by-printk.patch2
-rw-r--r--debian/patches-rt/0007-x86-fpu-Remove-fpu-initialized.patch476
-rw-r--r--debian/patches-rt/0008-clocksource-drivers-timer-atmel-tcb-tc_clksrc_suspen.patch35
-rw-r--r--debian/patches-rt/0008-printk-add-ring-buffer-and-kthread.patch12
-rw-r--r--debian/patches-rt/0008-x86-fpu-Remove-user_fpu_begin.patch83
-rw-r--r--debian/patches-rt/0009-misc-atmel_tclib-do-not-probe-already-used-TCBs.patch27
-rw-r--r--debian/patches-rt/0009-printk-remove-exclusive-console-hack.patch16
-rw-r--r--debian/patches-rt/0009-x86-fpu-Add-__-make_fpregs_active-helpers.patch77
-rw-r--r--debian/patches-rt/0010-printk-redirect-emit-store-to-new-ringbuffer.patch59
-rw-r--r--debian/patches-rt/0010-x86-fpu-Make-__raw_xsave_addr-use-feature-number-ins.patch98
-rw-r--r--debian/patches-rt/0011-printk_safe-remove-printk-safe-code.patch59
-rw-r--r--debian/patches-rt/0011-x86-fpu-Make-get_xsave_field_ptr-and-get_xsave_addr-.patch232
-rw-r--r--debian/patches-rt/0012-printk-minimize-console-locking-implementation.patch14
-rw-r--r--debian/patches-rt/0012-x86-pkru-Provide-.-_pkru_ins-functions.patch78
-rw-r--r--debian/patches-rt/0013-printk-track-seq-per-console.patch12
-rw-r--r--debian/patches-rt/0013-x86-fpu-Only-write-PKRU-if-it-is-different-from-curr.patch33
-rw-r--r--debian/patches-rt/0014-printk-do-boot_delay_msec-inside-printk_delay.patch14
-rw-r--r--debian/patches-rt/0014-x86-pkeys-Don-t-check-if-PKRU-is-zero-before-writtin.patch33
-rw-r--r--debian/patches-rt/0015-printk-print-history-for-new-consoles.patch6
-rw-r--r--debian/patches-rt/0015-x86-fpu-Eager-switch-PKRU-state.patch111
-rw-r--r--debian/patches-rt/0016-printk-implement-CON_PRINTBUFFER.patch32
-rw-r--r--debian/patches-rt/0016-x86-entry-Add-TIF_NEED_FPU_LOAD.patch57
-rw-r--r--debian/patches-rt/0017-printk-add-processor-number-to-output.patch52
-rw-r--r--debian/patches-rt/0017-x86-fpu-Always-store-the-registers-in-copy_fpstate_t.patch71
-rw-r--r--debian/patches-rt/0018-console-add-write_atomic-interface.patch6
-rw-r--r--debian/patches-rt/0018-x86-fpu-Prepare-copy_fpstate_to_sigframe-for-TIF_NEE.patch36
-rw-r--r--debian/patches-rt/0019-printk-introduce-emergency-messages.patch36
-rw-r--r--debian/patches-rt/0019-x86-fpu-Update-xstate-s-PKRU-value-on-write_pkru.patch53
-rw-r--r--debian/patches-rt/0020-serial-8250-implement-write_atomic.patch8
-rw-r--r--debian/patches-rt/0020-x86-fpu-Inline-copy_user_to_fpregs_zeroing.patch47
-rw-r--r--debian/patches-rt/0021-printk-implement-KERN_CONT.patch61
-rw-r--r--debian/patches-rt/0021-x86-fpu-Let-__fpu__restore_sig-restore-the-32bit-fxs.patch196
-rw-r--r--debian/patches-rt/0022-printk-implement-dev-kmsg.patch16
-rw-r--r--debian/patches-rt/0022-x86-fpu-Merge-the-two-code-paths-in-__fpu__restore_s.patch195
-rw-r--r--debian/patches-rt/0023-printk-implement-syslog.patch14
-rw-r--r--debian/patches-rt/0023-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch591
-rw-r--r--debian/patches-rt/0024-printk-implement-kmsg_dump.patch22
-rw-r--r--debian/patches-rt/0024-x86-fpu-Add-a-fastpath-to-__fpu__restore_sig.patch53
-rw-r--r--debian/patches-rt/0025-printk-remove-unused-code.patch61
-rw-r--r--debian/patches-rt/0025-x86-fpu-Add-a-fastpath-to-copy_fpstate_to_sigframe.patch79
-rw-r--r--debian/patches-rt/0026-x86-fpu-Restore-FPU-register-in-copy_fpstate_to_sigf.patch68
-rw-r--r--debian/patches-rt/0027-x86-pkeys-add-PKRU-value-to-init_fpstate.patch74
-rw-r--r--debian/patches-rt/0028-x86-fpu-Fault-in-user-stack-if-copy_fpstate_to_sigfr.patch105
-rw-r--r--debian/patches-rt/0029-x86-fpu-Remove-unnecessary-saving-of-FPU-registers-i.patch67
-rw-r--r--debian/patches-rt/ARM-enable-irq-in-translation-section-permission-fau.patch6
-rw-r--r--debian/patches-rt/Drivers-hv-vmbus-include-header-for-get_irq_regs.patch4
-rw-r--r--debian/patches-rt/EXP-rcu-skip_workqueue.patch28
-rw-r--r--debian/patches-rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch8
-rw-r--r--debian/patches-rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch14
-rw-r--r--debian/patches-rt/add_migrate_disable.patch23
-rw-r--r--debian/patches-rt/apparmor-use-a-locallock-instead-preempt_disable.patch10
-rw-r--r--debian/patches-rt/arch-arm64-Add-lazy-preempt-support.patch53
-rw-r--r--debian/patches-rt/arm-disable-NEON-in-kernel-mode.patch4
-rw-r--r--debian/patches-rt/arm-enable-highmem-for-rt.patch14
-rw-r--r--debian/patches-rt/arm-highmem-flush-tlb-on-unmap.patch4
-rw-r--r--debian/patches-rt/arm-imx6-cpuidle-Use-raw_spinlock_t.patch43
-rw-r--r--debian/patches-rt/arm-include-definition-for-cpumask_t.patch2
-rw-r--r--debian/patches-rt/arm-preempt-lazy-support.patch24
-rw-r--r--debian/patches-rt/arm-remove-printk_nmi_.patch4
-rw-r--r--debian/patches-rt/arm64-KVM-compute_layout-before-altenates-are-applie.patch10
-rw-r--r--debian/patches-rt/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch36
-rw-r--r--debian/patches-rt/at91_dont_enable_disable_clock.patch2
-rw-r--r--debian/patches-rt/block-blk-mq-move-blk_queue_usage_counter_release-in.patch24
-rw-r--r--debian/patches-rt/block-mq-don-t-complete-requests-via-IPI.patch4
-rw-r--r--debian/patches-rt/block-mq-drop-preempt-disable.patch8
-rw-r--r--debian/patches-rt/block-mq-use-cpu_light.patch2
-rw-r--r--debian/patches-rt/block-use-cpu-chill.patch2
-rw-r--r--debian/patches-rt/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch6
-rw-r--r--debian/patches-rt/cgroups-use-simple-wait-in-css_release.patch71
-rw-r--r--debian/patches-rt/clocksource-tclib-add-proper-depend.patch25
-rw-r--r--debian/patches-rt/clocksource-tclib-allow-higher-clockrates.patch4
-rw-r--r--debian/patches-rt/completion-use-simple-wait-queues.patch22
-rw-r--r--debian/patches-rt/cond-resched-lock-rt-tweak.patch4
-rw-r--r--debian/patches-rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch8
-rw-r--r--debian/patches-rt/cpu-hotplug--Implement-CPU-pinning.patch16
-rw-r--r--debian/patches-rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch4
-rw-r--r--debian/patches-rt/cpumask-disable-offstack-on-rt.patch6
-rw-r--r--debian/patches-rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch50
-rw-r--r--debian/patches-rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch38
-rw-r--r--debian/patches-rt/crypto-chtls-remove-cdev_list_lock.patch26
-rw-r--r--debian/patches-rt/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch12
-rw-r--r--debian/patches-rt/crypto-limit-more-FPU-enabled-sections.patch8
-rw-r--r--debian/patches-rt/crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch77
-rw-r--r--debian/patches-rt/crypto-user-remove-crypto_cfg_mutex.patch26
-rw-r--r--debian/patches-rt/debugobjects-rt.patch2
-rw-r--r--debian/patches-rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch4
-rw-r--r--debian/patches-rt/drivers-tty-fix-omap-lock-crap.patch2
-rw-r--r--debian/patches-rt/drivers-tty-pl011-irq-disable-madness.patch2
-rw-r--r--debian/patches-rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch14
-rw-r--r--debian/patches-rt/drm-i915-Don-t-disable-interrupts-independently-of-t.patch6
-rw-r--r--debian/patches-rt/drm-i915-disable-tracing-on-RT.patch2
-rw-r--r--debian/patches-rt/drm-i915-fence-Do-not-use-TIMER_IRQSAFE.patch35
-rw-r--r--debian/patches-rt/drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch4
-rw-r--r--debian/patches-rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch24
-rw-r--r--debian/patches-rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch10
-rw-r--r--debian/patches-rt/efi-Allow-efi-runtime.patch4
-rw-r--r--debian/patches-rt/efi-Disable-runtime-services-on-RT.patch4
-rw-r--r--debian/patches-rt/epoll-use-get-cpu-light.patch4
-rw-r--r--debian/patches-rt/fs-aio-simple-simple-work.patch16
-rw-r--r--debian/patches-rt/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch8
-rw-r--r--debian/patches-rt/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch20
-rw-r--r--debian/patches-rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch10
-rw-r--r--debian/patches-rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch38
-rw-r--r--debian/patches-rt/fs-jbd-replace-bh_state-lock.patch4
-rw-r--r--debian/patches-rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch20
-rw-r--r--debian/patches-rt/fs-replace-bh_uptodate_lock-for-rt.patch22
-rw-r--r--debian/patches-rt/fscache-initialize-cookie-hash-table-raw-spinlocks.patch8
-rw-r--r--debian/patches-rt/ftrace-Fix-trace-header-alignment.patch4
-rw-r--r--debian/patches-rt/ftrace-migrate-disable-tracing.patch6
-rw-r--r--debian/patches-rt/futex-Delay-deallocation-of-pi_state.patch174
-rw-r--r--debian/patches-rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch43
-rw-r--r--debian/patches-rt/futex-Make-the-futex_hash_bucket-lock-raw.patch331
-rw-r--r--debian/patches-rt/futex-requeue-pi-fix.patch113
-rw-r--r--debian/patches-rt/futex-workaround-migrate_disable-enable-in-different.patch61
-rw-r--r--debian/patches-rt/genirq-disable-irqpoll-on-rt.patch2
-rw-r--r--debian/patches-rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch103
-rw-r--r--debian/patches-rt/genirq-force-threading.patch4
-rw-r--r--debian/patches-rt/genirq-update-irq_set_irqchip_state-documentation.patch4
-rw-r--r--debian/patches-rt/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch18
-rw-r--r--debian/patches-rt/hotplug-light-get-online-cpus.patch18
-rw-r--r--debian/patches-rt/hrtimer-Introduce-expiry-spin-lock.patch103
-rw-r--r--debian/patches-rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch85
-rw-r--r--debian/patches-rt/hrtimer-by-timers-by-default-into-the-softirq-context.patch32
-rw-r--r--debian/patches-rt/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch30
-rw-r--r--debian/patches-rt/hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch4
-rw-r--r--debian/patches-rt/hrtimers-prepare-full-preemption.patch273
-rw-r--r--debian/patches-rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch147
-rw-r--r--debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch84
-rw-r--r--debian/patches-rt/jump-label-rt.patch4
-rw-r--r--debian/patches-rt/kconfig-disable-a-few-options-rt.patch4
-rw-r--r--debian/patches-rt/kconfig-preempt-rt-full.patch4
-rw-r--r--debian/patches-rt/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch108
-rw-r--r--debian/patches-rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch10
-rw-r--r--debian/patches-rt/kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch18
-rw-r--r--debian/patches-rt/kthread-Do-not-use-TIMER_IRQSAFE.patch68
-rw-r--r--debian/patches-rt/kthread-add-a-global-worker-thread.patch145
-rw-r--r--debian/patches-rt/kthread-convert-worker-lock-to-raw-spinlock.patch204
-rw-r--r--debian/patches-rt/leds-trigger-disable-CPU-trigger-on-RT.patch4
-rw-r--r--debian/patches-rt/list_bl-fixup-bogus-lockdep-warning.patch2
-rw-r--r--debian/patches-rt/list_bl.h-make-list-head-locking-RT-safe.patch4
-rw-r--r--debian/patches-rt/locallock-provide-get-put-_locked_ptr-variants.patch2
-rw-r--r--debian/patches-rt/localversion.patch4
-rw-r--r--debian/patches-rt/lockdep-disable-self-test.patch4
-rw-r--r--debian/patches-rt/lockdep-no-softirq-accounting-on-rt.patch6
-rw-r--r--debian/patches-rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch2
-rw-r--r--debian/patches-rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch6
-rw-r--r--debian/patches-rt/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch14
-rw-r--r--debian/patches-rt/locking-lockdep-Don-t-complain-about-incorrect-name-.patch44
-rw-r--r--debian/patches-rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch4
-rw-r--r--debian/patches-rt/locking-rt-mutex-Flush-block-plug-on-__down_read.patch2
-rw-r--r--debian/patches-rt/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch8
-rw-r--r--debian/patches-rt/locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch4
-rw-r--r--debian/patches-rt/md-disable-bcache.patch4
-rw-r--r--debian/patches-rt/md-raid5-percpu-handling-rt-aware.patch12
-rw-r--r--debian/patches-rt/mips-disable-highmem-on-rt.patch4
-rw-r--r--debian/patches-rt/mm-convert-swap-to-percpu-locked.patch32
-rw-r--r--debian/patches-rt/mm-disable-sloub-rt.patch6
-rw-r--r--debian/patches-rt/mm-enable-slub.patch6
-rw-r--r--debian/patches-rt/mm-make-vmstat-rt-aware.patch26
-rw-r--r--debian/patches-rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch6
-rw-r--r--debian/patches-rt/mm-memcontrol-do_not_disable_irq.patch22
-rw-r--r--debian/patches-rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch46
-rw-r--r--debian/patches-rt/mm-perform-lru_add_drain_all-remotely.patch10
-rw-r--r--debian/patches-rt/mm-protect-activate-switch-mm.patch4
-rw-r--r--debian/patches-rt/mm-rt-kmap-atomic-scheduling.patch26
-rw-r--r--debian/patches-rt/mm-scatterlist-dont-disable-irqs-on-RT.patch4
-rw-r--r--debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch12
-rw-r--r--debian/patches-rt/mm-workingset-replace-IRQ-off-check-with-a-lockdep-a.patch6
-rw-r--r--debian/patches-rt/mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch118
-rw-r--r--debian/patches-rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch2
-rw-r--r--debian/patches-rt/mutex-no-spin-on-rt.patch8
-rw-r--r--debian/patches-rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch8
-rw-r--r--debian/patches-rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch34
-rw-r--r--debian/patches-rt/net-add-a-lock-around-icmp_sk.patch59
-rw-r--r--debian/patches-rt/net-add-back-the-missing-serialization-in-ip_send_un.patch93
-rw-r--r--debian/patches-rt/net-another-local-irq-disable-alloc-atomic-headache.patch10
-rw-r--r--debian/patches-rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch16
-rw-r--r--debian/patches-rt/net-core-use-local_bh_disable-in-netif_rx_ni.patch4
-rw-r--r--debian/patches-rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch4
-rw-r--r--debian/patches-rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch10
-rw-r--r--debian/patches-rt/net-make-devnet_rename_seq-a-mutex.patch14
-rw-r--r--debian/patches-rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch266
-rw-r--r--debian/patches-rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch79
-rw-r--r--debian/patches-rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch4
-rw-r--r--debian/patches-rt/net-use-cpu-chill.patch8
-rw-r--r--debian/patches-rt/net_disable_NET_RX_BUSY_POLL.patch4
-rw-r--r--debian/patches-rt/of-allocate-free-phandle-cache-outside-of-the-devtre.patch2
-rw-r--r--debian/patches-rt/oleg-signal-rt-fix.patch10
-rw-r--r--debian/patches-rt/panic-disable-random-on-rt.patch4
-rw-r--r--debian/patches-rt/pci-switchtec-Don-t-use-completion-s-wait-queue.patch4
-rw-r--r--debian/patches-rt/percpu-include-irqflags.h-for-raw_local_irq_save.patch2
-rw-r--r--debian/patches-rt/peterz-percpu-rwsem-rt.patch219
-rw-r--r--debian/patches-rt/pid.h-include-atomic.h.patch2
-rw-r--r--debian/patches-rt/posix-timers-expiry-lock.patch270
-rw-r--r--debian/patches-rt/posix-timers-move-rcu-out-of-union.patch52
-rw-r--r--debian/patches-rt/posix-timers-thread-posix-cpu-timers-on-rt.patch17
-rw-r--r--debian/patches-rt/power-disable-highmem-on-rt.patch4
-rw-r--r--debian/patches-rt/power-use-generic-rwsem-on-rt.patch27
-rw-r--r--debian/patches-rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch4
-rw-r--r--debian/patches-rt/powerpc-preempt-lazy-support.patch100
-rw-r--r--debian/patches-rt/powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch16
-rw-r--r--debian/patches-rt/powerpc-stackprotector-work-around-stack-guard-init-.patch2
-rw-r--r--debian/patches-rt/preempt-lazy-support.patch96
-rw-r--r--debian/patches-rt/preempt-nort-rt-variants.patch6
-rw-r--r--debian/patches-rt/printk-devkmsg-llseek-reset-clear-if-it-is-lost.patch8
-rw-r--r--debian/patches-rt/printk-kmsg_dump-remove-mutex-usage.patch14
-rw-r--r--debian/patches-rt/printk-only-allow-kernel-to-emergency-message.patch19
-rw-r--r--debian/patches-rt/printk-print-rate-limitted-message-as-info.patch2
-rw-r--r--debian/patches-rt/printk-set-deferred-to-default-loglevel-enforce-mask.patch8
-rw-r--r--debian/patches-rt/psi-replace-delayed-work-with-timer-work.patch140
-rw-r--r--debian/patches-rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch12
-rw-r--r--debian/patches-rt/radix-tree-use-local-locks.patch18
-rw-r--r--debian/patches-rt/random-avoid-preempt_disable-ed-section.patch75
-rw-r--r--debian/patches-rt/random-make-it-work-on-rt.patch14
-rw-r--r--debian/patches-rt/rcu-Eliminate-softirq-processing-from-rcutree.patch302
-rw-r--r--debian/patches-rt/rcu-disable-rcu-fast-no-hz-on-rt.patch25
-rw-r--r--debian/patches-rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch11
-rw-r--r--debian/patches-rt/rcu-make-RCU_BOOST-default-on-RT.patch4
-rw-r--r--debian/patches-rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch4
-rw-r--r--debian/patches-rt/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch8
-rw-r--r--debian/patches-rt/rt-introduce-cpu-chill.patch6
-rw-r--r--debian/patches-rt/rt-local-irq-lock.patch2
-rw-r--r--debian/patches-rt/rt-preempt-base-config.patch9
-rw-r--r--debian/patches-rt/rt-serial-warn-fix.patch2
-rw-r--r--debian/patches-rt/rtmutex-Make-lock_killable-work.patch4
-rw-r--r--debian/patches-rt/rtmutex-Provide-rt_mutex_slowlock_locked.patch10
-rw-r--r--debian/patches-rt/rtmutex-add-mutex-implementation-based-on-rtmutex.patch2
-rw-r--r--debian/patches-rt/rtmutex-add-rwlock-implementation-based-on-rtmutex.patch2
-rw-r--r--debian/patches-rt/rtmutex-add-rwsem-implementation-based-on-rtmutex.patch2
-rw-r--r--debian/patches-rt/rtmutex-add-sleeping-lock-implementation.patch155
-rw-r--r--debian/patches-rt/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch32
-rw-r--r--debian/patches-rt/rtmutex-annotate-sleeping-lock-context.patch22
-rw-r--r--debian/patches-rt/rtmutex-avoid-include-hell.patch2
-rw-r--r--debian/patches-rt/rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch16
-rw-r--r--debian/patches-rt/rtmutex-futex-prepare-rt.patch245
-rw-r--r--debian/patches-rt/rtmutex-lock-killable.patch4
-rw-r--r--debian/patches-rt/rtmutex-trylock-is-okay-on-RT.patch4
-rw-r--r--debian/patches-rt/rtmutex-wire-up-RT-s-locking.patch53
-rw-r--r--debian/patches-rt/rtmutex_dont_include_rcu.patch40
-rw-r--r--debian/patches-rt/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch4
-rw-r--r--debian/patches-rt/sched-completion-Fix-a-lockup-in-wait_for_completion.patch61
-rw-r--r--debian/patches-rt/sched-core-Schedule-new-worker-even-if-PI-blocked.patch44
-rw-r--r--debian/patches-rt/sched-delay-put-task.patch28
-rw-r--r--debian/patches-rt/sched-disable-rt-group-sched-on-rt.patch4
-rw-r--r--debian/patches-rt/sched-disable-ttwu-queue.patch2
-rw-r--r--debian/patches-rt/sched-fair-Make-the-hrtimers-non-hard-again.patch4
-rw-r--r--debian/patches-rt/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch145
-rw-r--r--debian/patches-rt/sched-limit-nr-migrate.patch4
-rw-r--r--debian/patches-rt/sched-might-sleep-do-not-account-rcu-depth.patch8
-rw-r--r--debian/patches-rt/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch4
-rw-r--r--debian/patches-rt/sched-migrate_disable-fallback-to-preempt_disable-in.patch18
-rw-r--r--debian/patches-rt/sched-mmdrop-delayed.patch14
-rw-r--r--debian/patches-rt/sched-rt-mutex-wakeup.patch12
-rw-r--r--debian/patches-rt/sched-swait-Add-swait_event_lock_irq.patch33
-rw-r--r--debian/patches-rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch38
-rw-r--r--debian/patches-rt/scsi-fcoe-rt-aware.patch16
-rw-r--r--debian/patches-rt/seqlock-prevent-rt-starvation.patch14
-rw-r--r--debian/patches-rt/serial-8250-export-symbols-which-are-used-by-symbols.patch4
-rw-r--r--debian/patches-rt/serial-8250-remove-that-trylock-in-serial8250_consol.patch2
-rw-r--r--debian/patches-rt/series128
-rw-r--r--debian/patches-rt/signal-revert-ptrace-preempt-magic.patch8
-rw-r--r--debian/patches-rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch30
-rw-r--r--debian/patches-rt/skbufhead-raw-lock.patch24
-rw-r--r--debian/patches-rt/slub-disable-SLUB_CPU_PARTIAL.patch4
-rw-r--r--debian/patches-rt/slub-enable-irqs-for-no-wait.patch6
-rw-r--r--debian/patches-rt/softirq-Add-preemptible-softirq.patch477
-rw-r--r--debian/patches-rt/softirq-Avoid-a-cancel-dead-lock-in-tasklet-handling.patch49
-rw-r--r--debian/patches-rt/softirq-disable-softirq-stacks-for-rt.patch35
-rw-r--r--debian/patches-rt/softirq-preempt-fix-3-re.patch20
-rw-r--r--debian/patches-rt/softirq-split-locks.patch844
-rw-r--r--debian/patches-rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch240
-rw-r--r--debian/patches-rt/spinlock-types-separate-raw.patch2
-rw-r--r--debian/patches-rt/squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch8
-rw-r--r--debian/patches-rt/srcu-Remove-srcu_queue_delayed_work_on.patch188
-rw-r--r--debian/patches-rt/srcu-replace-local_irqsave-with-a-locallock.patch12
-rw-r--r--debian/patches-rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch6
-rw-r--r--debian/patches-rt/sysfs-realtime-entry.patch6
-rw-r--r--debian/patches-rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch295
-rw-r--r--debian/patches-rt/thermal-Defer-thermal-wakups-to-threads.patch89
-rw-r--r--debian/patches-rt/time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch46
-rw-r--r--debian/patches-rt/timekeeping-split-jiffies-lock.patch10
-rw-r--r--debian/patches-rt/timers-Drop-expiry-lock-after-each-timer-invocation.patch50
-rw-r--r--debian/patches-rt/timers-Introduce-expiry-spin-lock.patch153
-rw-r--r--debian/patches-rt/timers-prepare-for-full-preemption.patch139
-rw-r--r--debian/patches-rt/tpm-remove-tpm_dev_wq_lock.patch8
-rw-r--r--debian/patches-rt/tpm_tis-fix-stall-after-iowrite-s.patch8
-rw-r--r--debian/patches-rt/tty-serial-pl011-warning-about-uninitialized.patch2
-rw-r--r--debian/patches-rt/tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch56
-rw-r--r--debian/patches-rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch6
-rw-r--r--debian/patches-rt/wait.h-include-atomic.h.patch2
-rw-r--r--debian/patches-rt/watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch2
-rw-r--r--debian/patches-rt/work-queue-work-around-irqsafe-timer-optimization.patch133
-rw-r--r--debian/patches-rt/workqueue-Convert-the-locks-to-raw-type.patch697
-rw-r--r--debian/patches-rt/workqueue-Make-alloc-apply-free_workqueue_attrs-stat.patch65
-rw-r--r--debian/patches-rt/workqueue-Remove-GPF-argument-from-alloc_workqueue_a.patch106
-rw-r--r--debian/patches-rt/workqueue-distangle-from-rq-lock.patch283
-rw-r--r--debian/patches-rt/workqueue-prevent-deadlock-stall.patch200
-rw-r--r--debian/patches-rt/workqueue-use-locallock.patch179
-rw-r--r--debian/patches-rt/workqueue-use-rcu.patch317
-rw-r--r--debian/patches-rt/x86-Disable-HAVE_ARCH_JUMP_LABEL.patch33
-rw-r--r--debian/patches-rt/x86-crypto-reduce-preempt-disabled-regions.patch12
-rw-r--r--debian/patches-rt/x86-highmem-add-a-already-used-pte-check.patch4
-rw-r--r--debian/patches-rt/x86-ima-Check-EFI_RUNTIME_SERVICES-before-using.patch32
-rw-r--r--debian/patches-rt/x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch10
-rw-r--r--debian/patches-rt/x86-kvm-require-const-tsc-for-rt.patch4
-rw-r--r--debian/patches-rt/x86-ldt-Initialize-the-context-lock-for-init_mm.patch28
-rw-r--r--debian/patches-rt/x86-preempt-lazy.patch22
-rw-r--r--debian/patches-rt/x86-signal-delay-calling-signals-on-32bit.patch2
-rw-r--r--debian/patches-rt/x86-stackprot-no-random-on-rt.patch2
-rw-r--r--debian/patches-rt/x86-use-gen-rwsem-spinlocks-rt.patch29
333 files changed, 4828 insertions, 13456 deletions
diff --git a/debian/patches-rt/0001-ARM-at91-move-SoC-specific-definitions-to-SoC-folder.patch b/debian/patches-rt/0001-ARM-at91-move-SoC-specific-definitions-to-SoC-folder.patch
deleted file mode 100644
index 31fe0f698..000000000
--- a/debian/patches-rt/0001-ARM-at91-move-SoC-specific-definitions-to-SoC-folder.patch
+++ /dev/null
@@ -1,607 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Date: Fri, 26 Apr 2019 23:47:10 +0200
-Subject: [PATCH 01/10] ARM: at91: move SoC specific definitions to SoC folder
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Move linux/atmel_tc.h to the SoC specific folder include/soc/at91.
-
-Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Acked-by: Thierry Reding <thierry.reding@gmail.com>
-Acked-by: Arnd Bergmann <arnd@arndb.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/clocksource/tcb_clksrc.c | 2
- drivers/misc/atmel_tclib.c | 2
- drivers/pwm/pwm-atmel-tcb.c | 2
- include/linux/atmel_tc.h | 270 ---------------------------------------
- include/soc/at91/atmel_tcb.h | 270 +++++++++++++++++++++++++++++++++++++++
- 5 files changed, 273 insertions(+), 273 deletions(-)
- delete mode 100644 include/linux/atmel_tc.h
- create mode 100644 include/soc/at91/atmel_tcb.h
-
---- a/drivers/clocksource/tcb_clksrc.c
-+++ b/drivers/clocksource/tcb_clksrc.c
-@@ -11,7 +11,7 @@
- #include <linux/io.h>
- #include <linux/platform_device.h>
- #include <linux/syscore_ops.h>
--#include <linux/atmel_tc.h>
-+#include <soc/at91/atmel_tcb.h>
-
-
- /*
---- a/drivers/misc/atmel_tclib.c
-+++ b/drivers/misc/atmel_tclib.c
-@@ -1,4 +1,3 @@
--#include <linux/atmel_tc.h>
- #include <linux/clk.h>
- #include <linux/err.h>
- #include <linux/init.h>
-@@ -10,6 +9,7 @@
- #include <linux/slab.h>
- #include <linux/export.h>
- #include <linux/of.h>
-+#include <soc/at91/atmel_tcb.h>
-
- /*
- * This is a thin library to solve the problem of how to portably allocate
---- a/drivers/pwm/pwm-atmel-tcb.c
-+++ b/drivers/pwm/pwm-atmel-tcb.c
-@@ -17,10 +17,10 @@
- #include <linux/ioport.h>
- #include <linux/io.h>
- #include <linux/platform_device.h>
--#include <linux/atmel_tc.h>
- #include <linux/pwm.h>
- #include <linux/of_device.h>
- #include <linux/slab.h>
-+#include <soc/at91/atmel_tcb.h>
-
- #define NPWM 6
-
---- a/include/linux/atmel_tc.h
-+++ /dev/null
-@@ -1,270 +0,0 @@
--/*
-- * Timer/Counter Unit (TC) registers.
-- *
-- * This program is free software; you can redistribute it and/or modify
-- * it under the terms of the GNU General Public License as published by
-- * the Free Software Foundation; either version 2 of the License, or
-- * (at your option) any later version.
-- */
--
--#ifndef ATMEL_TC_H
--#define ATMEL_TC_H
--
--#include <linux/compiler.h>
--#include <linux/list.h>
--
--/*
-- * Many 32-bit Atmel SOCs include one or more TC blocks, each of which holds
-- * three general-purpose 16-bit timers. These timers share one register bank.
-- * Depending on the SOC, each timer may have its own clock and IRQ, or those
-- * may be shared by the whole TC block.
-- *
-- * These TC blocks may have up to nine external pins: TCLK0..2 signals for
-- * clocks or clock gates, and per-timer TIOA and TIOB signals used for PWM
-- * or triggering. Those pins need to be set up for use with the TC block,
-- * else they will be used as GPIOs or for a different controller.
-- *
-- * Although we expect each TC block to have a platform_device node, those
-- * nodes are not what drivers bind to. Instead, they ask for a specific
-- * TC block, by number ... which is a common approach on systems with many
-- * timers. Then they use clk_get() and platform_get_irq() to get clock and
-- * IRQ resources.
-- */
--
--struct clk;
--
--/**
-- * struct atmel_tcb_config - SoC data for a Timer/Counter Block
-- * @counter_width: size in bits of a timer counter register
-- */
--struct atmel_tcb_config {
-- size_t counter_width;
--};
--
--/**
-- * struct atmel_tc - information about a Timer/Counter Block
-- * @pdev: physical device
-- * @regs: mapping through which the I/O registers can be accessed
-- * @id: block id
-- * @tcb_config: configuration data from SoC
-- * @irq: irq for each of the three channels
-- * @clk: internal clock source for each of the three channels
-- * @node: list node, for tclib internal use
-- * @allocated: if already used, for tclib internal use
-- *
-- * On some platforms, each TC channel has its own clocks and IRQs,
-- * while on others, all TC channels share the same clock and IRQ.
-- * Drivers should clk_enable() all the clocks they need even though
-- * all the entries in @clk may point to the same physical clock.
-- * Likewise, drivers should request irqs independently for each
-- * channel, but they must use IRQF_SHARED in case some of the entries
-- * in @irq are actually the same IRQ.
-- */
--struct atmel_tc {
-- struct platform_device *pdev;
-- void __iomem *regs;
-- int id;
-- const struct atmel_tcb_config *tcb_config;
-- int irq[3];
-- struct clk *clk[3];
-- struct clk *slow_clk;
-- struct list_head node;
-- bool allocated;
--};
--
--extern struct atmel_tc *atmel_tc_alloc(unsigned block);
--extern void atmel_tc_free(struct atmel_tc *tc);
--
--/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */
--extern const u8 atmel_tc_divisors[5];
--
--
--/*
-- * Two registers have block-wide controls. These are: configuring the three
-- * "external" clocks (or event sources) used by the timer channels; and
-- * synchronizing the timers by resetting them all at once.
-- *
-- * "External" can mean "external to chip" using the TCLK0, TCLK1, or TCLK2
-- * signals. Or, it can mean "external to timer", using the TIOA output from
-- * one of the other two timers that's being run in waveform mode.
-- */
--
--#define ATMEL_TC_BCR 0xc0 /* TC Block Control Register */
--#define ATMEL_TC_SYNC (1 << 0) /* synchronize timers */
--
--#define ATMEL_TC_BMR 0xc4 /* TC Block Mode Register */
--#define ATMEL_TC_TC0XC0S (3 << 0) /* external clock 0 source */
--#define ATMEL_TC_TC0XC0S_TCLK0 (0 << 0)
--#define ATMEL_TC_TC0XC0S_NONE (1 << 0)
--#define ATMEL_TC_TC0XC0S_TIOA1 (2 << 0)
--#define ATMEL_TC_TC0XC0S_TIOA2 (3 << 0)
--#define ATMEL_TC_TC1XC1S (3 << 2) /* external clock 1 source */
--#define ATMEL_TC_TC1XC1S_TCLK1 (0 << 2)
--#define ATMEL_TC_TC1XC1S_NONE (1 << 2)
--#define ATMEL_TC_TC1XC1S_TIOA0 (2 << 2)
--#define ATMEL_TC_TC1XC1S_TIOA2 (3 << 2)
--#define ATMEL_TC_TC2XC2S (3 << 4) /* external clock 2 source */
--#define ATMEL_TC_TC2XC2S_TCLK2 (0 << 4)
--#define ATMEL_TC_TC2XC2S_NONE (1 << 4)
--#define ATMEL_TC_TC2XC2S_TIOA0 (2 << 4)
--#define ATMEL_TC_TC2XC2S_TIOA1 (3 << 4)
--
--
--/*
-- * Each TC block has three "channels", each with one counter and controls.
-- *
-- * Note that the semantics of ATMEL_TC_TIMER_CLOCKx (input clock selection
-- * when it's not "external") is silicon-specific. AT91 platforms use one
-- * set of definitions; AVR32 platforms use a different set. Don't hard-wire
-- * such knowledge into your code, use the global "atmel_tc_divisors" ...
-- * where index N is the divisor for clock N+1, else zero to indicate it uses
-- * the 32 KiHz clock.
-- *
-- * The timers can be chained in various ways, and operated in "waveform"
-- * generation mode (including PWM) or "capture" mode (to time events). In
-- * both modes, behavior can be configured in many ways.
-- *
-- * Each timer has two I/O pins, TIOA and TIOB. Waveform mode uses TIOA as a
-- * PWM output, and TIOB as either another PWM or as a trigger. Capture mode
-- * uses them only as inputs.
-- */
--#define ATMEL_TC_CHAN(idx) ((idx)*0x40)
--#define ATMEL_TC_REG(idx, reg) (ATMEL_TC_CHAN(idx) + ATMEL_TC_ ## reg)
--
--#define ATMEL_TC_CCR 0x00 /* Channel Control Register */
--#define ATMEL_TC_CLKEN (1 << 0) /* clock enable */
--#define ATMEL_TC_CLKDIS (1 << 1) /* clock disable */
--#define ATMEL_TC_SWTRG (1 << 2) /* software trigger */
--
--#define ATMEL_TC_CMR 0x04 /* Channel Mode Register */
--
--/* Both modes share some CMR bits */
--#define ATMEL_TC_TCCLKS (7 << 0) /* clock source */
--#define ATMEL_TC_TIMER_CLOCK1 (0 << 0)
--#define ATMEL_TC_TIMER_CLOCK2 (1 << 0)
--#define ATMEL_TC_TIMER_CLOCK3 (2 << 0)
--#define ATMEL_TC_TIMER_CLOCK4 (3 << 0)
--#define ATMEL_TC_TIMER_CLOCK5 (4 << 0)
--#define ATMEL_TC_XC0 (5 << 0)
--#define ATMEL_TC_XC1 (6 << 0)
--#define ATMEL_TC_XC2 (7 << 0)
--#define ATMEL_TC_CLKI (1 << 3) /* clock invert */
--#define ATMEL_TC_BURST (3 << 4) /* clock gating */
--#define ATMEL_TC_GATE_NONE (0 << 4)
--#define ATMEL_TC_GATE_XC0 (1 << 4)
--#define ATMEL_TC_GATE_XC1 (2 << 4)
--#define ATMEL_TC_GATE_XC2 (3 << 4)
--#define ATMEL_TC_WAVE (1 << 15) /* true = Waveform mode */
--
--/* CAPTURE mode CMR bits */
--#define ATMEL_TC_LDBSTOP (1 << 6) /* counter stops on RB load */
--#define ATMEL_TC_LDBDIS (1 << 7) /* counter disable on RB load */
--#define ATMEL_TC_ETRGEDG (3 << 8) /* external trigger edge */
--#define ATMEL_TC_ETRGEDG_NONE (0 << 8)
--#define ATMEL_TC_ETRGEDG_RISING (1 << 8)
--#define ATMEL_TC_ETRGEDG_FALLING (2 << 8)
--#define ATMEL_TC_ETRGEDG_BOTH (3 << 8)
--#define ATMEL_TC_ABETRG (1 << 10) /* external trigger is TIOA? */
--#define ATMEL_TC_CPCTRG (1 << 14) /* RC compare trigger enable */
--#define ATMEL_TC_LDRA (3 << 16) /* RA loading edge (of TIOA) */
--#define ATMEL_TC_LDRA_NONE (0 << 16)
--#define ATMEL_TC_LDRA_RISING (1 << 16)
--#define ATMEL_TC_LDRA_FALLING (2 << 16)
--#define ATMEL_TC_LDRA_BOTH (3 << 16)
--#define ATMEL_TC_LDRB (3 << 18) /* RB loading edge (of TIOA) */
--#define ATMEL_TC_LDRB_NONE (0 << 18)
--#define ATMEL_TC_LDRB_RISING (1 << 18)
--#define ATMEL_TC_LDRB_FALLING (2 << 18)
--#define ATMEL_TC_LDRB_BOTH (3 << 18)
--
--/* WAVEFORM mode CMR bits */
--#define ATMEL_TC_CPCSTOP (1 << 6) /* RC compare stops counter */
--#define ATMEL_TC_CPCDIS (1 << 7) /* RC compare disables counter */
--#define ATMEL_TC_EEVTEDG (3 << 8) /* external event edge */
--#define ATMEL_TC_EEVTEDG_NONE (0 << 8)
--#define ATMEL_TC_EEVTEDG_RISING (1 << 8)
--#define ATMEL_TC_EEVTEDG_FALLING (2 << 8)
--#define ATMEL_TC_EEVTEDG_BOTH (3 << 8)
--#define ATMEL_TC_EEVT (3 << 10) /* external event source */
--#define ATMEL_TC_EEVT_TIOB (0 << 10)
--#define ATMEL_TC_EEVT_XC0 (1 << 10)
--#define ATMEL_TC_EEVT_XC1 (2 << 10)
--#define ATMEL_TC_EEVT_XC2 (3 << 10)
--#define ATMEL_TC_ENETRG (1 << 12) /* external event is trigger */
--#define ATMEL_TC_WAVESEL (3 << 13) /* waveform type */
--#define ATMEL_TC_WAVESEL_UP (0 << 13)
--#define ATMEL_TC_WAVESEL_UPDOWN (1 << 13)
--#define ATMEL_TC_WAVESEL_UP_AUTO (2 << 13)
--#define ATMEL_TC_WAVESEL_UPDOWN_AUTO (3 << 13)
--#define ATMEL_TC_ACPA (3 << 16) /* RA compare changes TIOA */
--#define ATMEL_TC_ACPA_NONE (0 << 16)
--#define ATMEL_TC_ACPA_SET (1 << 16)
--#define ATMEL_TC_ACPA_CLEAR (2 << 16)
--#define ATMEL_TC_ACPA_TOGGLE (3 << 16)
--#define ATMEL_TC_ACPC (3 << 18) /* RC compare changes TIOA */
--#define ATMEL_TC_ACPC_NONE (0 << 18)
--#define ATMEL_TC_ACPC_SET (1 << 18)
--#define ATMEL_TC_ACPC_CLEAR (2 << 18)
--#define ATMEL_TC_ACPC_TOGGLE (3 << 18)
--#define ATMEL_TC_AEEVT (3 << 20) /* external event changes TIOA */
--#define ATMEL_TC_AEEVT_NONE (0 << 20)
--#define ATMEL_TC_AEEVT_SET (1 << 20)
--#define ATMEL_TC_AEEVT_CLEAR (2 << 20)
--#define ATMEL_TC_AEEVT_TOGGLE (3 << 20)
--#define ATMEL_TC_ASWTRG (3 << 22) /* software trigger changes TIOA */
--#define ATMEL_TC_ASWTRG_NONE (0 << 22)
--#define ATMEL_TC_ASWTRG_SET (1 << 22)
--#define ATMEL_TC_ASWTRG_CLEAR (2 << 22)
--#define ATMEL_TC_ASWTRG_TOGGLE (3 << 22)
--#define ATMEL_TC_BCPB (3 << 24) /* RB compare changes TIOB */
--#define ATMEL_TC_BCPB_NONE (0 << 24)
--#define ATMEL_TC_BCPB_SET (1 << 24)
--#define ATMEL_TC_BCPB_CLEAR (2 << 24)
--#define ATMEL_TC_BCPB_TOGGLE (3 << 24)
--#define ATMEL_TC_BCPC (3 << 26) /* RC compare changes TIOB */
--#define ATMEL_TC_BCPC_NONE (0 << 26)
--#define ATMEL_TC_BCPC_SET (1 << 26)
--#define ATMEL_TC_BCPC_CLEAR (2 << 26)
--#define ATMEL_TC_BCPC_TOGGLE (3 << 26)
--#define ATMEL_TC_BEEVT (3 << 28) /* external event changes TIOB */
--#define ATMEL_TC_BEEVT_NONE (0 << 28)
--#define ATMEL_TC_BEEVT_SET (1 << 28)
--#define ATMEL_TC_BEEVT_CLEAR (2 << 28)
--#define ATMEL_TC_BEEVT_TOGGLE (3 << 28)
--#define ATMEL_TC_BSWTRG (3 << 30) /* software trigger changes TIOB */
--#define ATMEL_TC_BSWTRG_NONE (0 << 30)
--#define ATMEL_TC_BSWTRG_SET (1 << 30)
--#define ATMEL_TC_BSWTRG_CLEAR (2 << 30)
--#define ATMEL_TC_BSWTRG_TOGGLE (3 << 30)
--
--#define ATMEL_TC_CV 0x10 /* counter Value */
--#define ATMEL_TC_RA 0x14 /* register A */
--#define ATMEL_TC_RB 0x18 /* register B */
--#define ATMEL_TC_RC 0x1c /* register C */
--
--#define ATMEL_TC_SR 0x20 /* status (read-only) */
--/* Status-only flags */
--#define ATMEL_TC_CLKSTA (1 << 16) /* clock enabled */
--#define ATMEL_TC_MTIOA (1 << 17) /* TIOA mirror */
--#define ATMEL_TC_MTIOB (1 << 18) /* TIOB mirror */
--
--#define ATMEL_TC_IER 0x24 /* interrupt enable (write-only) */
--#define ATMEL_TC_IDR 0x28 /* interrupt disable (write-only) */
--#define ATMEL_TC_IMR 0x2c /* interrupt mask (read-only) */
--
--/* Status and IRQ flags */
--#define ATMEL_TC_COVFS (1 << 0) /* counter overflow */
--#define ATMEL_TC_LOVRS (1 << 1) /* load overrun */
--#define ATMEL_TC_CPAS (1 << 2) /* RA compare */
--#define ATMEL_TC_CPBS (1 << 3) /* RB compare */
--#define ATMEL_TC_CPCS (1 << 4) /* RC compare */
--#define ATMEL_TC_LDRAS (1 << 5) /* RA loading */
--#define ATMEL_TC_LDRBS (1 << 6) /* RB loading */
--#define ATMEL_TC_ETRGS (1 << 7) /* external trigger */
--#define ATMEL_TC_ALL_IRQ (ATMEL_TC_COVFS | ATMEL_TC_LOVRS | \
-- ATMEL_TC_CPAS | ATMEL_TC_CPBS | \
-- ATMEL_TC_CPCS | ATMEL_TC_LDRAS | \
-- ATMEL_TC_LDRBS | ATMEL_TC_ETRGS) \
-- /* all IRQs */
--
--#endif
---- /dev/null
-+++ b/include/soc/at91/atmel_tcb.h
-@@ -0,0 +1,270 @@
-+/*
-+ * Timer/Counter Unit (TC) registers.
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ */
-+
-+#ifndef __SOC_ATMEL_TCB_H
-+#define __SOC_ATMEL_TCB_H
-+
-+#include <linux/compiler.h>
-+#include <linux/list.h>
-+
-+/*
-+ * Many 32-bit Atmel SOCs include one or more TC blocks, each of which holds
-+ * three general-purpose 16-bit timers. These timers share one register bank.
-+ * Depending on the SOC, each timer may have its own clock and IRQ, or those
-+ * may be shared by the whole TC block.
-+ *
-+ * These TC blocks may have up to nine external pins: TCLK0..2 signals for
-+ * clocks or clock gates, and per-timer TIOA and TIOB signals used for PWM
-+ * or triggering. Those pins need to be set up for use with the TC block,
-+ * else they will be used as GPIOs or for a different controller.
-+ *
-+ * Although we expect each TC block to have a platform_device node, those
-+ * nodes are not what drivers bind to. Instead, they ask for a specific
-+ * TC block, by number ... which is a common approach on systems with many
-+ * timers. Then they use clk_get() and platform_get_irq() to get clock and
-+ * IRQ resources.
-+ */
-+
-+struct clk;
-+
-+/**
-+ * struct atmel_tcb_config - SoC data for a Timer/Counter Block
-+ * @counter_width: size in bits of a timer counter register
-+ */
-+struct atmel_tcb_config {
-+ size_t counter_width;
-+};
-+
-+/**
-+ * struct atmel_tc - information about a Timer/Counter Block
-+ * @pdev: physical device
-+ * @regs: mapping through which the I/O registers can be accessed
-+ * @id: block id
-+ * @tcb_config: configuration data from SoC
-+ * @irq: irq for each of the three channels
-+ * @clk: internal clock source for each of the three channels
-+ * @node: list node, for tclib internal use
-+ * @allocated: if already used, for tclib internal use
-+ *
-+ * On some platforms, each TC channel has its own clocks and IRQs,
-+ * while on others, all TC channels share the same clock and IRQ.
-+ * Drivers should clk_enable() all the clocks they need even though
-+ * all the entries in @clk may point to the same physical clock.
-+ * Likewise, drivers should request irqs independently for each
-+ * channel, but they must use IRQF_SHARED in case some of the entries
-+ * in @irq are actually the same IRQ.
-+ */
-+struct atmel_tc {
-+ struct platform_device *pdev;
-+ void __iomem *regs;
-+ int id;
-+ const struct atmel_tcb_config *tcb_config;
-+ int irq[3];
-+ struct clk *clk[3];
-+ struct clk *slow_clk;
-+ struct list_head node;
-+ bool allocated;
-+};
-+
-+extern struct atmel_tc *atmel_tc_alloc(unsigned block);
-+extern void atmel_tc_free(struct atmel_tc *tc);
-+
-+/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */
-+extern const u8 atmel_tc_divisors[5];
-+
-+
-+/*
-+ * Two registers have block-wide controls. These are: configuring the three
-+ * "external" clocks (or event sources) used by the timer channels; and
-+ * synchronizing the timers by resetting them all at once.
-+ *
-+ * "External" can mean "external to chip" using the TCLK0, TCLK1, or TCLK2
-+ * signals. Or, it can mean "external to timer", using the TIOA output from
-+ * one of the other two timers that's being run in waveform mode.
-+ */
-+
-+#define ATMEL_TC_BCR 0xc0 /* TC Block Control Register */
-+#define ATMEL_TC_SYNC (1 << 0) /* synchronize timers */
-+
-+#define ATMEL_TC_BMR 0xc4 /* TC Block Mode Register */
-+#define ATMEL_TC_TC0XC0S (3 << 0) /* external clock 0 source */
-+#define ATMEL_TC_TC0XC0S_TCLK0 (0 << 0)
-+#define ATMEL_TC_TC0XC0S_NONE (1 << 0)
-+#define ATMEL_TC_TC0XC0S_TIOA1 (2 << 0)
-+#define ATMEL_TC_TC0XC0S_TIOA2 (3 << 0)
-+#define ATMEL_TC_TC1XC1S (3 << 2) /* external clock 1 source */
-+#define ATMEL_TC_TC1XC1S_TCLK1 (0 << 2)
-+#define ATMEL_TC_TC1XC1S_NONE (1 << 2)
-+#define ATMEL_TC_TC1XC1S_TIOA0 (2 << 2)
-+#define ATMEL_TC_TC1XC1S_TIOA2 (3 << 2)
-+#define ATMEL_TC_TC2XC2S (3 << 4) /* external clock 2 source */
-+#define ATMEL_TC_TC2XC2S_TCLK2 (0 << 4)
-+#define ATMEL_TC_TC2XC2S_NONE (1 << 4)
-+#define ATMEL_TC_TC2XC2S_TIOA0 (2 << 4)
-+#define ATMEL_TC_TC2XC2S_TIOA1 (3 << 4)
-+
-+
-+/*
-+ * Each TC block has three "channels", each with one counter and controls.
-+ *
-+ * Note that the semantics of ATMEL_TC_TIMER_CLOCKx (input clock selection
-+ * when it's not "external") is silicon-specific. AT91 platforms use one
-+ * set of definitions; AVR32 platforms use a different set. Don't hard-wire
-+ * such knowledge into your code, use the global "atmel_tc_divisors" ...
-+ * where index N is the divisor for clock N+1, else zero to indicate it uses
-+ * the 32 KiHz clock.
-+ *
-+ * The timers can be chained in various ways, and operated in "waveform"
-+ * generation mode (including PWM) or "capture" mode (to time events). In
-+ * both modes, behavior can be configured in many ways.
-+ *
-+ * Each timer has two I/O pins, TIOA and TIOB. Waveform mode uses TIOA as a
-+ * PWM output, and TIOB as either another PWM or as a trigger. Capture mode
-+ * uses them only as inputs.
-+ */
-+#define ATMEL_TC_CHAN(idx) ((idx)*0x40)
-+#define ATMEL_TC_REG(idx, reg) (ATMEL_TC_CHAN(idx) + ATMEL_TC_ ## reg)
-+
-+#define ATMEL_TC_CCR 0x00 /* Channel Control Register */
-+#define ATMEL_TC_CLKEN (1 << 0) /* clock enable */
-+#define ATMEL_TC_CLKDIS (1 << 1) /* clock disable */
-+#define ATMEL_TC_SWTRG (1 << 2) /* software trigger */
-+
-+#define ATMEL_TC_CMR 0x04 /* Channel Mode Register */
-+
-+/* Both modes share some CMR bits */
-+#define ATMEL_TC_TCCLKS (7 << 0) /* clock source */
-+#define ATMEL_TC_TIMER_CLOCK1 (0 << 0)
-+#define ATMEL_TC_TIMER_CLOCK2 (1 << 0)
-+#define ATMEL_TC_TIMER_CLOCK3 (2 << 0)
-+#define ATMEL_TC_TIMER_CLOCK4 (3 << 0)
-+#define ATMEL_TC_TIMER_CLOCK5 (4 << 0)
-+#define ATMEL_TC_XC0 (5 << 0)
-+#define ATMEL_TC_XC1 (6 << 0)
-+#define ATMEL_TC_XC2 (7 << 0)
-+#define ATMEL_TC_CLKI (1 << 3) /* clock invert */
-+#define ATMEL_TC_BURST (3 << 4) /* clock gating */
-+#define ATMEL_TC_GATE_NONE (0 << 4)
-+#define ATMEL_TC_GATE_XC0 (1 << 4)
-+#define ATMEL_TC_GATE_XC1 (2 << 4)
-+#define ATMEL_TC_GATE_XC2 (3 << 4)
-+#define ATMEL_TC_WAVE (1 << 15) /* true = Waveform mode */
-+
-+/* CAPTURE mode CMR bits */
-+#define ATMEL_TC_LDBSTOP (1 << 6) /* counter stops on RB load */
-+#define ATMEL_TC_LDBDIS (1 << 7) /* counter disable on RB load */
-+#define ATMEL_TC_ETRGEDG (3 << 8) /* external trigger edge */
-+#define ATMEL_TC_ETRGEDG_NONE (0 << 8)
-+#define ATMEL_TC_ETRGEDG_RISING (1 << 8)
-+#define ATMEL_TC_ETRGEDG_FALLING (2 << 8)
-+#define ATMEL_TC_ETRGEDG_BOTH (3 << 8)
-+#define ATMEL_TC_ABETRG (1 << 10) /* external trigger is TIOA? */
-+#define ATMEL_TC_CPCTRG (1 << 14) /* RC compare trigger enable */
-+#define ATMEL_TC_LDRA (3 << 16) /* RA loading edge (of TIOA) */
-+#define ATMEL_TC_LDRA_NONE (0 << 16)
-+#define ATMEL_TC_LDRA_RISING (1 << 16)
-+#define ATMEL_TC_LDRA_FALLING (2 << 16)
-+#define ATMEL_TC_LDRA_BOTH (3 << 16)
-+#define ATMEL_TC_LDRB (3 << 18) /* RB loading edge (of TIOA) */
-+#define ATMEL_TC_LDRB_NONE (0 << 18)
-+#define ATMEL_TC_LDRB_RISING (1 << 18)
-+#define ATMEL_TC_LDRB_FALLING (2 << 18)
-+#define ATMEL_TC_LDRB_BOTH (3 << 18)
-+
-+/* WAVEFORM mode CMR bits */
-+#define ATMEL_TC_CPCSTOP (1 << 6) /* RC compare stops counter */
-+#define ATMEL_TC_CPCDIS (1 << 7) /* RC compare disables counter */
-+#define ATMEL_TC_EEVTEDG (3 << 8) /* external event edge */
-+#define ATMEL_TC_EEVTEDG_NONE (0 << 8)
-+#define ATMEL_TC_EEVTEDG_RISING (1 << 8)
-+#define ATMEL_TC_EEVTEDG_FALLING (2 << 8)
-+#define ATMEL_TC_EEVTEDG_BOTH (3 << 8)
-+#define ATMEL_TC_EEVT (3 << 10) /* external event source */
-+#define ATMEL_TC_EEVT_TIOB (0 << 10)
-+#define ATMEL_TC_EEVT_XC0 (1 << 10)
-+#define ATMEL_TC_EEVT_XC1 (2 << 10)
-+#define ATMEL_TC_EEVT_XC2 (3 << 10)
-+#define ATMEL_TC_ENETRG (1 << 12) /* external event is trigger */
-+#define ATMEL_TC_WAVESEL (3 << 13) /* waveform type */
-+#define ATMEL_TC_WAVESEL_UP (0 << 13)
-+#define ATMEL_TC_WAVESEL_UPDOWN (1 << 13)
-+#define ATMEL_TC_WAVESEL_UP_AUTO (2 << 13)
-+#define ATMEL_TC_WAVESEL_UPDOWN_AUTO (3 << 13)
-+#define ATMEL_TC_ACPA (3 << 16) /* RA compare changes TIOA */
-+#define ATMEL_TC_ACPA_NONE (0 << 16)
-+#define ATMEL_TC_ACPA_SET (1 << 16)
-+#define ATMEL_TC_ACPA_CLEAR (2 << 16)
-+#define ATMEL_TC_ACPA_TOGGLE (3 << 16)
-+#define ATMEL_TC_ACPC (3 << 18) /* RC compare changes TIOA */
-+#define ATMEL_TC_ACPC_NONE (0 << 18)
-+#define ATMEL_TC_ACPC_SET (1 << 18)
-+#define ATMEL_TC_ACPC_CLEAR (2 << 18)
-+#define ATMEL_TC_ACPC_TOGGLE (3 << 18)
-+#define ATMEL_TC_AEEVT (3 << 20) /* external event changes TIOA */
-+#define ATMEL_TC_AEEVT_NONE (0 << 20)
-+#define ATMEL_TC_AEEVT_SET (1 << 20)
-+#define ATMEL_TC_AEEVT_CLEAR (2 << 20)
-+#define ATMEL_TC_AEEVT_TOGGLE (3 << 20)
-+#define ATMEL_TC_ASWTRG (3 << 22) /* software trigger changes TIOA */
-+#define ATMEL_TC_ASWTRG_NONE (0 << 22)
-+#define ATMEL_TC_ASWTRG_SET (1 << 22)
-+#define ATMEL_TC_ASWTRG_CLEAR (2 << 22)
-+#define ATMEL_TC_ASWTRG_TOGGLE (3 << 22)
-+#define ATMEL_TC_BCPB (3 << 24) /* RB compare changes TIOB */
-+#define ATMEL_TC_BCPB_NONE (0 << 24)
-+#define ATMEL_TC_BCPB_SET (1 << 24)
-+#define ATMEL_TC_BCPB_CLEAR (2 << 24)
-+#define ATMEL_TC_BCPB_TOGGLE (3 << 24)
-+#define ATMEL_TC_BCPC (3 << 26) /* RC compare changes TIOB */
-+#define ATMEL_TC_BCPC_NONE (0 << 26)
-+#define ATMEL_TC_BCPC_SET (1 << 26)
-+#define ATMEL_TC_BCPC_CLEAR (2 << 26)
-+#define ATMEL_TC_BCPC_TOGGLE (3 << 26)
-+#define ATMEL_TC_BEEVT (3 << 28) /* external event changes TIOB */
-+#define ATMEL_TC_BEEVT_NONE (0 << 28)
-+#define ATMEL_TC_BEEVT_SET (1 << 28)
-+#define ATMEL_TC_BEEVT_CLEAR (2 << 28)
-+#define ATMEL_TC_BEEVT_TOGGLE (3 << 28)
-+#define ATMEL_TC_BSWTRG (3 << 30) /* software trigger changes TIOB */
-+#define ATMEL_TC_BSWTRG_NONE (0 << 30)
-+#define ATMEL_TC_BSWTRG_SET (1 << 30)
-+#define ATMEL_TC_BSWTRG_CLEAR (2 << 30)
-+#define ATMEL_TC_BSWTRG_TOGGLE (3 << 30)
-+
-+#define ATMEL_TC_CV 0x10 /* counter Value */
-+#define ATMEL_TC_RA 0x14 /* register A */
-+#define ATMEL_TC_RB 0x18 /* register B */
-+#define ATMEL_TC_RC 0x1c /* register C */
-+
-+#define ATMEL_TC_SR 0x20 /* status (read-only) */
-+/* Status-only flags */
-+#define ATMEL_TC_CLKSTA (1 << 16) /* clock enabled */
-+#define ATMEL_TC_MTIOA (1 << 17) /* TIOA mirror */
-+#define ATMEL_TC_MTIOB (1 << 18) /* TIOB mirror */
-+
-+#define ATMEL_TC_IER 0x24 /* interrupt enable (write-only) */
-+#define ATMEL_TC_IDR 0x28 /* interrupt disable (write-only) */
-+#define ATMEL_TC_IMR 0x2c /* interrupt mask (read-only) */
-+
-+/* Status and IRQ flags */
-+#define ATMEL_TC_COVFS (1 << 0) /* counter overflow */
-+#define ATMEL_TC_LOVRS (1 << 1) /* load overrun */
-+#define ATMEL_TC_CPAS (1 << 2) /* RA compare */
-+#define ATMEL_TC_CPBS (1 << 3) /* RB compare */
-+#define ATMEL_TC_CPCS (1 << 4) /* RC compare */
-+#define ATMEL_TC_LDRAS (1 << 5) /* RA loading */
-+#define ATMEL_TC_LDRBS (1 << 6) /* RB loading */
-+#define ATMEL_TC_ETRGS (1 << 7) /* external trigger */
-+#define ATMEL_TC_ALL_IRQ (ATMEL_TC_COVFS | ATMEL_TC_LOVRS | \
-+ ATMEL_TC_CPAS | ATMEL_TC_CPBS | \
-+ ATMEL_TC_CPCS | ATMEL_TC_LDRAS | \
-+ ATMEL_TC_LDRBS | ATMEL_TC_ETRGS) \
-+ /* all IRQs */
-+
-+#endif
diff --git a/debian/patches-rt/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/debian/patches-rt/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
index ce4e148c7..8e6103b42 100644
--- a/debian/patches-rt/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
+++ b/debian/patches-rt/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
@@ -2,7 +2,7 @@ From: Peter Zijlstra <peterz@infradead.org>
Date: Mon, 28 May 2018 15:24:20 +0200
Subject: [PATCH 1/4] Split IRQ-off and zone->lock while freeing pages from PCP
list #1
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Split the IRQ-off section while accessing the PCP list from zone->lock
while freeing pages.
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -1112,7 +1112,7 @@ static inline void prefetch_buddy(struct
+@@ -1182,7 +1182,7 @@ static inline void prefetch_buddy(struct
}
/*
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
-@@ -1123,14 +1123,41 @@ static inline void prefetch_buddy(struct
+@@ -1193,14 +1193,41 @@ static inline void prefetch_buddy(struct
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
while (count) {
struct list_head *list;
-@@ -1162,7 +1189,7 @@ static void free_pcppages_bulk(struct zo
+@@ -1232,7 +1259,7 @@ static void free_pcppages_bulk(struct zo
if (bulkfree_pcp_prepare(page))
continue;
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We are going to put the page back to the global
-@@ -1177,26 +1204,6 @@ static void free_pcppages_bulk(struct zo
+@@ -1247,26 +1274,6 @@ static void free_pcppages_bulk(struct zo
prefetch_buddy(page);
} while (--count && --batch_free && !list_empty(list));
}
@@ -109,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void free_one_page(struct zone *zone,
-@@ -2608,13 +2615,18 @@ void drain_zone_pages(struct zone *zone,
+@@ -2724,13 +2731,18 @@ void drain_zone_pages(struct zone *zone,
{
unsigned long flags;
int to_drain, batch;
@@ -129,7 +129,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#endif
-@@ -2630,14 +2642,21 @@ static void drain_pages_zone(unsigned in
+@@ -2746,14 +2758,21 @@ static void drain_pages_zone(unsigned in
unsigned long flags;
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -153,7 +153,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2865,7 +2884,10 @@ static void free_unref_page_commit(struc
+@@ -2981,7 +3000,10 @@ static void free_unref_page_commit(struc
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
diff --git a/debian/patches-rt/0001-printk-rb-add-printk-ring-buffer-documentation.patch b/debian/patches-rt/0001-printk-rb-add-printk-ring-buffer-documentation.patch
index 36654c391..d7558c7c4 100644
--- a/debian/patches-rt/0001-printk-rb-add-printk-ring-buffer-documentation.patch
+++ b/debian/patches-rt/0001-printk-rb-add-printk-ring-buffer-documentation.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:39 +0100
Subject: [PATCH 01/25] printk-rb: add printk ring buffer documentation
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The full documentation file for the printk ring buffer.
diff --git a/debian/patches-rt/0001-x86-fpu-Remove-fpu-initialized-usage-in-__fpu__resto.patch b/debian/patches-rt/0001-x86-fpu-Remove-fpu-initialized-usage-in-__fpu__resto.patch
deleted file mode 100644
index 6810e5362..000000000
--- a/debian/patches-rt/0001-x86-fpu-Remove-fpu-initialized-usage-in-__fpu__resto.patch
+++ /dev/null
@@ -1,153 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 16 Oct 2018 11:08:14 +0200
-Subject: [PATCH 01/27] x86/fpu: Remove fpu->initialized usage in
- __fpu__restore_sig()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-This is a preparation for the removal of the ->initialized member in the
-fpu struct.
-__fpu__restore_sig() is deactivating the FPU via fpu__drop() and then
-setting manually ->initialized followed by fpu__restore(). The result is
-that it is possible to manipulate fpu->state and the state of registers
-won't be saved/restored on a context switch which would overwrite
-fpu->state.
-
-Don't access the fpu->state while the content is read from user space
-and examined / sanitized. Use a temporary kmalloc() buffer for the
-preparation of the FPU registers and once the state is considered okay,
-load it. Should something go wrong, return with an error and without
-altering the original FPU registers.
-
-The removal of "fpu__initialize()" is a nop because fpu->initialized is
-already set for the user task.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Acked-by: Borislav Petkov <bp@suse.de>
----
- arch/x86/include/asm/fpu/signal.h | 2 -
- arch/x86/kernel/fpu/regset.c | 5 +---
- arch/x86/kernel/fpu/signal.c | 40 ++++++++++++++------------------------
- 3 files changed, 18 insertions(+), 29 deletions(-)
-
---- a/arch/x86/include/asm/fpu/signal.h
-+++ b/arch/x86/include/asm/fpu/signal.h
-@@ -22,7 +22,7 @@ int ia32_setup_frame(int sig, struct ksi
-
- extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
- struct task_struct *tsk);
--extern void convert_to_fxsr(struct task_struct *tsk,
-+extern void convert_to_fxsr(struct fxregs_state *fxsave,
- const struct user_i387_ia32_struct *env);
-
- unsigned long
---- a/arch/x86/kernel/fpu/regset.c
-+++ b/arch/x86/kernel/fpu/regset.c
-@@ -269,11 +269,10 @@ convert_from_fxsr(struct user_i387_ia32_
- memcpy(&to[i], &from[i], sizeof(to[0]));
- }
-
--void convert_to_fxsr(struct task_struct *tsk,
-+void convert_to_fxsr(struct fxregs_state *fxsave,
- const struct user_i387_ia32_struct *env)
-
- {
-- struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave;
- struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
- struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
- int i;
-@@ -350,7 +349,7 @@ int fpregs_set(struct task_struct *targe
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
- if (!ret)
-- convert_to_fxsr(target, &env);
-+ convert_to_fxsr(&target->thread.fpu.state.fxsave, &env);
-
- /*
- * update the header bit in the xsave header, indicating the
---- a/arch/x86/kernel/fpu/signal.c
-+++ b/arch/x86/kernel/fpu/signal.c
-@@ -207,11 +207,11 @@ int copy_fpstate_to_sigframe(void __user
- }
-
- static inline void
--sanitize_restored_xstate(struct task_struct *tsk,
-+sanitize_restored_xstate(union fpregs_state *state,
- struct user_i387_ia32_struct *ia32_env,
- u64 xfeatures, int fx_only)
- {
-- struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
-+ struct xregs_state *xsave = &state->xsave;
- struct xstate_header *header = &xsave->header;
-
- if (use_xsave()) {
-@@ -238,7 +238,7 @@ sanitize_restored_xstate(struct task_str
- */
- xsave->i387.mxcsr &= mxcsr_feature_mask;
-
-- convert_to_fxsr(tsk, ia32_env);
-+ convert_to_fxsr(&state->fxsave, ia32_env);
- }
- }
-
-@@ -284,8 +284,6 @@ static int __fpu__restore_sig(void __use
- if (!access_ok(buf, size))
- return -EACCES;
-
-- fpu__initialize(fpu);
--
- if (!static_cpu_has(X86_FEATURE_FPU))
- return fpregs_soft_set(current, NULL,
- 0, sizeof(struct user_i387_ia32_struct),
-@@ -315,40 +313,32 @@ static int __fpu__restore_sig(void __use
- * header. Validate and sanitize the copied state.
- */
- struct user_i387_ia32_struct env;
-+ union fpregs_state *state;
- int err = 0;
-+ void *tmp;
-
-- /*
-- * Drop the current fpu which clears fpu->initialized. This ensures
-- * that any context-switch during the copy of the new state,
-- * avoids the intermediate state from getting restored/saved.
-- * Thus avoiding the new restored state from getting corrupted.
-- * We will be ready to restore/save the state only after
-- * fpu->initialized is again set.
-- */
-- fpu__drop(fpu);
-+ tmp = kzalloc(sizeof(*state) + fpu_kernel_xstate_size + 64, GFP_KERNEL);
-+ if (!tmp)
-+ return -ENOMEM;
-+ state = PTR_ALIGN(tmp, 64);
-
- if (using_compacted_format()) {
-- err = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
-+ err = copy_user_to_xstate(&state->xsave, buf_fx);
- } else {
-- err = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
-+ err = __copy_from_user(&state->xsave, buf_fx, state_size);
-
- if (!err && state_size > offsetof(struct xregs_state, header))
-- err = validate_xstate_header(&fpu->state.xsave.header);
-+ err = validate_xstate_header(&state->xsave.header);
- }
-
- if (err || __copy_from_user(&env, buf, sizeof(env))) {
-- fpstate_init(&fpu->state);
-- trace_x86_fpu_init_state(fpu);
- err = -1;
- } else {
-- sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
-+ sanitize_restored_xstate(state, &env, xfeatures, fx_only);
-+ copy_kernel_to_fpregs(state);
- }
-
-- local_bh_disable();
-- fpu->initialized = 1;
-- fpu__restore(fpu);
-- local_bh_enable();
--
-+ kfree(tmp);
- return err;
- } else {
- /*
diff --git a/debian/patches-rt/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/debian/patches-rt/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
index 3f391f1a7..631909677 100644
--- a/debian/patches-rt/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
+++ b/debian/patches-rt/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
@@ -2,7 +2,7 @@ From: Peter Zijlstra <peterz@infradead.org>
Date: Mon, 28 May 2018 15:24:21 +0200
Subject: [PATCH 2/4] Split IRQ-off and zone->lock while freeing pages from PCP
list #2
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Split the IRQ-off section while accessing the PCP list from zone->lock
while freeing pages.
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -1122,8 +1122,8 @@ static inline void prefetch_buddy(struct
+@@ -1192,8 +1192,8 @@ static inline void prefetch_buddy(struct
* And clear the zone's pages_scanned counter, to hold off the "all pages are
* pinned" detection logic.
*/
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
bool isolated_pageblocks;
struct page *page, *tmp;
-@@ -1138,12 +1138,27 @@ static void free_pcppages_bulk(struct zo
+@@ -1208,12 +1208,27 @@ static void free_pcppages_bulk(struct zo
*/
list_for_each_entry_safe(page, tmp, head, lru) {
int mt = get_pcppage_migratetype(page);
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__free_one_page(page, page_to_pfn(page), zone, 0, mt);
trace_mm_page_pcpu_drain(page, 0, mt);
}
-@@ -2626,7 +2641,7 @@ void drain_zone_pages(struct zone *zone,
+@@ -2742,7 +2757,7 @@ void drain_zone_pages(struct zone *zone,
local_irq_restore(flags);
if (to_drain > 0)
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#endif
-@@ -2656,7 +2671,7 @@ static void drain_pages_zone(unsigned in
+@@ -2772,7 +2787,7 @@ static void drain_pages_zone(unsigned in
local_irq_restore(flags);
if (count)
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2855,7 +2870,8 @@ static bool free_unref_page_prepare(stru
+@@ -2971,7 +2986,8 @@ static bool free_unref_page_prepare(stru
return true;
}
@@ -85,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
-@@ -2884,10 +2900,8 @@ static void free_unref_page_commit(struc
+@@ -3000,10 +3016,8 @@ static void free_unref_page_commit(struc
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
@@ -97,7 +97,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -2898,13 +2912,17 @@ void free_unref_page(struct page *page)
+@@ -3014,13 +3028,17 @@ void free_unref_page(struct page *page)
{
unsigned long flags;
unsigned long pfn = page_to_pfn(page);
@@ -116,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2915,6 +2933,11 @@ void free_unref_page_list(struct list_he
+@@ -3031,6 +3049,11 @@ void free_unref_page_list(struct list_he
struct page *page, *next;
unsigned long flags, pfn;
int batch_count = 0;
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Prepare pages for freeing */
list_for_each_entry_safe(page, next, list, lru) {
-@@ -2927,10 +2950,12 @@ void free_unref_page_list(struct list_he
+@@ -3043,10 +3066,12 @@ void free_unref_page_list(struct list_he
local_irq_save(flags);
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_private(page);
@@ -142,7 +142,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Guard against excessive IRQ disabled times when we get
-@@ -2943,6 +2968,21 @@ void free_unref_page_list(struct list_he
+@@ -3059,6 +3084,21 @@ void free_unref_page_list(struct list_he
}
}
local_irq_restore(flags);
diff --git a/debian/patches-rt/0002-clocksource-drivers-tcb_clksrc-stop-depending-on-atm.patch b/debian/patches-rt/0002-clocksource-drivers-tcb_clksrc-stop-depending-on-atm.patch
deleted file mode 100644
index 2c30b1591..000000000
--- a/debian/patches-rt/0002-clocksource-drivers-tcb_clksrc-stop-depending-on-atm.patch
+++ /dev/null
@@ -1,252 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Date: Fri, 26 Apr 2019 23:47:11 +0200
-Subject: [PATCH 02/10] clocksource/drivers/tcb_clksrc: stop depending on
- atmel_tclib
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-atmel_tclib is probed too late in the boot process to be able to use the
-TCB as the boot clocksource. This is an issue for SoCs without the PIT
-(sams70, samv70 and samv71 families) as they simply currently can't boot.
-
-Get rid of the atmel_tclib dependency and probe everything on our own using
-the correct device tree binding.
-
-This also allows getting rid of ATMEL_TCB_CLKSRC_BLOCK and makes the driver
-a bit more flexible as the TCB is not hardcoded in the kernel anymore.
-
-Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/clocksource/tcb_clksrc.c | 108 ++++++++++++++++++++++++---------------
- drivers/misc/Kconfig | 14 -----
- 2 files changed, 70 insertions(+), 52 deletions(-)
-
---- a/drivers/clocksource/tcb_clksrc.c
-+++ b/drivers/clocksource/tcb_clksrc.c
-@@ -9,7 +9,8 @@
- #include <linux/err.h>
- #include <linux/ioport.h>
- #include <linux/io.h>
--#include <linux/platform_device.h>
-+#include <linux/of_address.h>
-+#include <linux/of_irq.h>
- #include <linux/syscore_ops.h>
- #include <soc/at91/atmel_tcb.h>
-
-@@ -28,13 +29,6 @@
- * source, used in either periodic or oneshot mode. This runs
- * at 32 KiHZ, and can handle delays of up to two seconds.
- *
-- * A boot clocksource and clockevent source are also currently needed,
-- * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
-- * this code can be used when init_timers() is called, well before most
-- * devices are set up. (Some low end AT91 parts, which can run uClinux,
-- * have only the timers in one TC block... they currently don't support
-- * the tclib code, because of that initialization issue.)
-- *
- * REVISIT behavior during system suspend states... we should disable
- * all clocks and save the power. Easily done for clockevent devices,
- * but clocksources won't necessarily get the needed notifications.
-@@ -112,7 +106,6 @@ void tc_clksrc_resume(struct clocksource
- }
-
- static struct clocksource clksrc = {
-- .name = "tcb_clksrc",
- .rating = 200,
- .read = tc_get_cycles,
- .mask = CLOCKSOURCE_MASK(32),
-@@ -214,7 +207,6 @@ static int tc_next_event(unsigned long d
-
- static struct tc_clkevt_device clkevt = {
- .clkevt = {
-- .name = "tc_clkevt",
- .features = CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_ONESHOT,
- /* Should be lower than at91rm9200's system timer */
-@@ -330,39 +322,73 @@ static void __init tcb_setup_single_chan
- writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
- }
-
--static int __init tcb_clksrc_init(void)
--{
-- static char bootinfo[] __initdata
-- = KERN_DEBUG "%s: tc%d at %d.%03d MHz\n";
-+static const u8 atmel_tcb_divisors[5] = { 2, 8, 32, 128, 0, };
-+
-+static const struct of_device_id atmel_tcb_of_match[] = {
-+ { .compatible = "atmel,at91rm9200-tcb", .data = (void *)16, },
-+ { .compatible = "atmel,at91sam9x5-tcb", .data = (void *)32, },
-+ { /* sentinel */ }
-+};
-
-- struct platform_device *pdev;
-- struct atmel_tc *tc;
-+static int __init tcb_clksrc_init(struct device_node *node)
-+{
-+ struct atmel_tc tc;
- struct clk *t0_clk;
-+ const struct of_device_id *match;
- u32 rate, divided_rate = 0;
- int best_divisor_idx = -1;
- int clk32k_divisor_idx = -1;
-+ int bits;
- int i;
- int ret;
-
-- tc = atmel_tc_alloc(CONFIG_ATMEL_TCB_CLKSRC_BLOCK);
-- if (!tc) {
-- pr_debug("can't alloc TC for clocksource\n");
-- return -ENODEV;
-+ /* Protect against multiple calls */
-+ if (tcaddr)
-+ return 0;
-+
-+ tc.regs = of_iomap(node->parent, 0);
-+ if (!tc.regs)
-+ return -ENXIO;
-+
-+ t0_clk = of_clk_get_by_name(node->parent, "t0_clk");
-+ if (IS_ERR(t0_clk))
-+ return PTR_ERR(t0_clk);
-+
-+ tc.slow_clk = of_clk_get_by_name(node->parent, "slow_clk");
-+ if (IS_ERR(tc.slow_clk))
-+ return PTR_ERR(tc.slow_clk);
-+
-+ tc.clk[0] = t0_clk;
-+ tc.clk[1] = of_clk_get_by_name(node->parent, "t1_clk");
-+ if (IS_ERR(tc.clk[1]))
-+ tc.clk[1] = t0_clk;
-+ tc.clk[2] = of_clk_get_by_name(node->parent, "t2_clk");
-+ if (IS_ERR(tc.clk[2]))
-+ tc.clk[2] = t0_clk;
-+
-+ tc.irq[2] = of_irq_get(node->parent, 2);
-+ if (tc.irq[2] <= 0) {
-+ tc.irq[2] = of_irq_get(node->parent, 0);
-+ if (tc.irq[2] <= 0)
-+ return -EINVAL;
- }
-- tcaddr = tc->regs;
-- pdev = tc->pdev;
-
-- t0_clk = tc->clk[0];
-+ match = of_match_node(atmel_tcb_of_match, node->parent);
-+ bits = (uintptr_t)match->data;
-+
-+ for (i = 0; i < ARRAY_SIZE(tc.irq); i++)
-+ writel(ATMEL_TC_ALL_IRQ, tc.regs + ATMEL_TC_REG(i, IDR));
-+
- ret = clk_prepare_enable(t0_clk);
- if (ret) {
- pr_debug("can't enable T0 clk\n");
-- goto err_free_tc;
-+ return ret;
- }
-
- /* How fast will we be counting? Pick something over 5 MHz. */
- rate = (u32) clk_get_rate(t0_clk);
-- for (i = 0; i < 5; i++) {
-- unsigned divisor = atmel_tc_divisors[i];
-+ for (i = 0; i < ARRAY_SIZE(atmel_tcb_divisors); i++) {
-+ unsigned divisor = atmel_tcb_divisors[i];
- unsigned tmp;
-
- /* remember 32 KiHz clock for later */
-@@ -381,27 +407,29 @@ static int __init tcb_clksrc_init(void)
- best_divisor_idx = i;
- }
-
--
-- printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,
-- divided_rate / 1000000,
-+ clksrc.name = kbasename(node->parent->full_name);
-+ clkevt.clkevt.name = kbasename(node->parent->full_name);
-+ pr_debug("%s at %d.%03d MHz\n", clksrc.name, divided_rate / 1000000,
- ((divided_rate % 1000000) + 500) / 1000);
-
-- if (tc->tcb_config && tc->tcb_config->counter_width == 32) {
-+ tcaddr = tc.regs;
-+
-+ if (bits == 32) {
- /* use apropriate function to read 32 bit counter */
- clksrc.read = tc_get_cycles32;
- /* setup ony channel 0 */
-- tcb_setup_single_chan(tc, best_divisor_idx);
-+ tcb_setup_single_chan(&tc, best_divisor_idx);
- } else {
-- /* tclib will give us three clocks no matter what the
-+ /* we have three clocks no matter what the
- * underlying platform supports.
- */
-- ret = clk_prepare_enable(tc->clk[1]);
-+ ret = clk_prepare_enable(tc.clk[1]);
- if (ret) {
- pr_debug("can't enable T1 clk\n");
- goto err_disable_t0;
- }
- /* setup both channel 0 & 1 */
-- tcb_setup_dual_chan(tc, best_divisor_idx);
-+ tcb_setup_dual_chan(&tc, best_divisor_idx);
- }
-
- /* and away we go! */
-@@ -410,7 +438,7 @@ static int __init tcb_clksrc_init(void)
- goto err_disable_t1;
-
- /* channel 2: periodic and oneshot timer support */
-- ret = setup_clkevents(tc, clk32k_divisor_idx);
-+ ret = setup_clkevents(&tc, clk32k_divisor_idx);
- if (ret)
- goto err_unregister_clksrc;
-
-@@ -420,14 +448,14 @@ static int __init tcb_clksrc_init(void)
- clocksource_unregister(&clksrc);
-
- err_disable_t1:
-- if (!tc->tcb_config || tc->tcb_config->counter_width != 32)
-- clk_disable_unprepare(tc->clk[1]);
-+ if (bits != 32)
-+ clk_disable_unprepare(tc.clk[1]);
-
- err_disable_t0:
- clk_disable_unprepare(t0_clk);
-
--err_free_tc:
-- atmel_tc_free(tc);
-+ tcaddr = NULL;
-+
- return ret;
- }
--arch_initcall(tcb_clksrc_init);
-+TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init);
---- a/drivers/misc/Kconfig
-+++ b/drivers/misc/Kconfig
-@@ -61,7 +61,8 @@ config ATMEL_TCLIB
-
- config ATMEL_TCB_CLKSRC
- bool "TC Block Clocksource"
-- depends on ATMEL_TCLIB
-+ depends on ARCH_AT91
-+ select TIMER_OF if OF
- default y
- help
- Select this to get a high precision clocksource based on a
-@@ -72,17 +73,6 @@ config ATMEL_TCB_CLKSRC
- may be used as a clock event device supporting oneshot mode
- (delays of up to two seconds) based on the 32 KiHz clock.
-
--config ATMEL_TCB_CLKSRC_BLOCK
-- int
-- depends on ATMEL_TCB_CLKSRC
-- default 0
-- range 0 1
-- help
-- Some chips provide more than one TC block, so you have the
-- choice of which one to use for the clock framework. The other
-- TC can be used for other purposes, such as PWM generation and
-- interval timing.
--
- config DUMMY_IRQ
- tristate "Dummy IRQ handler"
- default n
diff --git a/debian/patches-rt/0002-printk-rb-add-prb-locking-functions.patch b/debian/patches-rt/0002-printk-rb-add-prb-locking-functions.patch
index b54f85fa6..598672516 100644
--- a/debian/patches-rt/0002-printk-rb-add-prb-locking-functions.patch
+++ b/debian/patches-rt/0002-printk-rb-add-prb-locking-functions.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:40 +0100
Subject: [PATCH 02/25] printk-rb: add prb locking functions
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Add processor-reentrant spin locking functions. These allow
restricting the number of possible contexts to 2, which can simplify
@@ -68,12 +68,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif /*_LINUX_PRINTK_RINGBUFFER_H */
--- a/lib/Makefile
+++ b/lib/Makefile
-@@ -19,7 +19,7 @@ KCOV_INSTRUMENT_dynamic_debug.o := n
+@@ -30,7 +30,7 @@ endif
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o timerqueue.o xarray.o \
-- idr.o int_sqrt.o extable.o \
-+ idr.o int_sqrt.o extable.o printk_ringbuffer.o \
+- idr.o extable.o \
++ idr.o extable.o printk_ringbuffer.o \
sha1.o chacha.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
diff --git a/debian/patches-rt/0002-x86-fpu-Remove-fpu__restore.patch b/debian/patches-rt/0002-x86-fpu-Remove-fpu__restore.patch
deleted file mode 100644
index 2dd0e9fad..000000000
--- a/debian/patches-rt/0002-x86-fpu-Remove-fpu__restore.patch
+++ /dev/null
@@ -1,98 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 17 Oct 2018 16:10:45 +0200
-Subject: [PATCH 02/27] x86/fpu: Remove fpu__restore()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-There are no users of fpu__restore() so it is time to remove it.
-The comment regarding fpu__restore() and TS bit is stale since commit
- b3b0870ef3ffe ("i387: do not preload FPU state at task switch time")
-and has no meaning since.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- Documentation/preempt-locking.txt | 1 -
- arch/x86/include/asm/fpu/internal.h | 1 -
- arch/x86/kernel/fpu/core.c | 24 ------------------------
- arch/x86/kernel/process_32.c | 4 +---
- arch/x86/kernel/process_64.c | 4 +---
- 5 files changed, 2 insertions(+), 32 deletions(-)
-
---- a/Documentation/preempt-locking.txt
-+++ b/Documentation/preempt-locking.txt
-@@ -52,7 +52,6 @@ preemption must be disabled around such
-
- Note, some FPU functions are already explicitly preempt safe. For example,
- kernel_fpu_begin and kernel_fpu_end will disable and enable preemption.
--However, fpu__restore() must be called with preemption disabled.
-
-
- RULE #3: Lock acquire and release must be performed by same task
---- a/arch/x86/include/asm/fpu/internal.h
-+++ b/arch/x86/include/asm/fpu/internal.h
-@@ -28,7 +28,6 @@ extern void fpu__initialize(struct fpu *
- extern void fpu__prepare_read(struct fpu *fpu);
- extern void fpu__prepare_write(struct fpu *fpu);
- extern void fpu__save(struct fpu *fpu);
--extern void fpu__restore(struct fpu *fpu);
- extern int fpu__restore_sig(void __user *buf, int ia32_frame);
- extern void fpu__drop(struct fpu *fpu);
- extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
---- a/arch/x86/kernel/fpu/core.c
-+++ b/arch/x86/kernel/fpu/core.c
-@@ -304,30 +304,6 @@ void fpu__prepare_write(struct fpu *fpu)
- }
-
- /*
-- * 'fpu__restore()' is called to copy FPU registers from
-- * the FPU fpstate to the live hw registers and to activate
-- * access to the hardware registers, so that FPU instructions
-- * can be used afterwards.
-- *
-- * Must be called with kernel preemption disabled (for example
-- * with local interrupts disabled, as it is in the case of
-- * do_device_not_available()).
-- */
--void fpu__restore(struct fpu *fpu)
--{
-- fpu__initialize(fpu);
--
-- /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
-- kernel_fpu_disable();
-- trace_x86_fpu_before_restore(fpu);
-- fpregs_activate(fpu);
-- copy_kernel_to_fpregs(&fpu->state);
-- trace_x86_fpu_after_restore(fpu);
-- kernel_fpu_enable();
--}
--EXPORT_SYMBOL_GPL(fpu__restore);
--
--/*
- * Drops current FPU state: deactivates the fpregs and
- * the fpstate. NOTE: it still leaves previous contents
- * in the fpregs in the eager-FPU case.
---- a/arch/x86/kernel/process_32.c
-+++ b/arch/x86/kernel/process_32.c
-@@ -267,9 +267,7 @@ EXPORT_SYMBOL_GPL(start_thread);
- /*
- * Leave lazy mode, flushing any hypercalls made here.
- * This must be done before restoring TLS segments so
-- * the GDT and LDT are properly updated, and must be
-- * done before fpu__restore(), so the TS bit is up
-- * to date.
-+ * the GDT and LDT are properly updated.
- */
- arch_end_context_switch(next_p);
-
---- a/arch/x86/kernel/process_64.c
-+++ b/arch/x86/kernel/process_64.c
-@@ -538,9 +538,7 @@ void compat_start_thread(struct pt_regs
- /*
- * Leave lazy mode, flushing any hypercalls made here. This
- * must be done after loading TLS entries in the GDT but before
-- * loading segments that might reference them, and and it must
-- * be done before fpu__restore(), so the TS bit is up to
-- * date.
-+ * loading segments that might reference them.
- */
- arch_end_context_switch(next_p);
-
diff --git a/debian/patches-rt/0003-clocksource-drivers-tcb_clksrc-Use-tcb-as-sched_cloc.patch b/debian/patches-rt/0003-clocksource-drivers-tcb_clksrc-Use-tcb-as-sched_cloc.patch
deleted file mode 100644
index 890df637b..000000000
--- a/debian/patches-rt/0003-clocksource-drivers-tcb_clksrc-Use-tcb-as-sched_cloc.patch
+++ /dev/null
@@ -1,74 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Date: Fri, 26 Apr 2019 23:47:12 +0200
-Subject: [PATCH 03/10] clocksource/drivers/tcb_clksrc: Use tcb as sched_clock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Now that the driver is registered early enough, use the TCB as the
-sched_clock which is much more accurate than the jiffies implementation.
-
-Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/clocksource/tcb_clksrc.c | 16 ++++++++++++++++
- 1 file changed, 16 insertions(+)
-
---- a/drivers/clocksource/tcb_clksrc.c
-+++ b/drivers/clocksource/tcb_clksrc.c
-@@ -11,6 +11,7 @@
- #include <linux/io.h>
- #include <linux/of_address.h>
- #include <linux/of_irq.h>
-+#include <linux/sched_clock.h>
- #include <linux/syscore_ops.h>
- #include <soc/at91/atmel_tcb.h>
-
-@@ -114,6 +115,16 @@ static struct clocksource clksrc = {
- .resume = tc_clksrc_resume,
- };
-
-+static u64 notrace tc_sched_clock_read(void)
-+{
-+ return tc_get_cycles(&clksrc);
-+}
-+
-+static u64 notrace tc_sched_clock_read32(void)
-+{
-+ return tc_get_cycles32(&clksrc);
-+}
-+
- #ifdef CONFIG_GENERIC_CLOCKEVENTS
-
- struct tc_clkevt_device {
-@@ -335,6 +346,7 @@ static int __init tcb_clksrc_init(struct
- struct atmel_tc tc;
- struct clk *t0_clk;
- const struct of_device_id *match;
-+ u64 (*tc_sched_clock)(void);
- u32 rate, divided_rate = 0;
- int best_divisor_idx = -1;
- int clk32k_divisor_idx = -1;
-@@ -419,6 +431,7 @@ static int __init tcb_clksrc_init(struct
- clksrc.read = tc_get_cycles32;
- /* setup ony channel 0 */
- tcb_setup_single_chan(&tc, best_divisor_idx);
-+ tc_sched_clock = tc_sched_clock_read32;
- } else {
- /* we have three clocks no matter what the
- * underlying platform supports.
-@@ -430,6 +443,7 @@ static int __init tcb_clksrc_init(struct
- }
- /* setup both channel 0 & 1 */
- tcb_setup_dual_chan(&tc, best_divisor_idx);
-+ tc_sched_clock = tc_sched_clock_read;
- }
-
- /* and away we go! */
-@@ -442,6 +456,8 @@ static int __init tcb_clksrc_init(struct
- if (ret)
- goto err_unregister_clksrc;
-
-+ sched_clock_register(tc_sched_clock, 32, divided_rate);
-+
- return 0;
-
- err_unregister_clksrc:
diff --git a/debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch b/debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch
index c10fcc141..6196f2b88 100644
--- a/debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch
+++ b/debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 28 May 2018 15:24:22 +0200
Subject: [PATCH 3/4] mm/SLxB: change list_lock to raw_spinlock_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The list_lock is used with used with IRQs off on RT. Make it a raw_spinlock_t
otherwise the interrupts won't be disabled on -RT. The locking rules remain
@@ -12,10 +12,10 @@ file for struct kmem_cache_node defintion.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- mm/slab.c | 94 +++++++++++++++++++++++++++++++-------------------------------
+ mm/slab.c | 90 +++++++++++++++++++++++++++++++-------------------------------
mm/slab.h | 2 -
- mm/slub.c | 50 ++++++++++++++++----------------
- 3 files changed, 73 insertions(+), 73 deletions(-)
+ mm/slub.c | 50 +++++++++++++++++-----------------
+ 3 files changed, 71 insertions(+), 71 deletions(-)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
parent->free_objects = 0;
parent->free_touched = 0;
}
-@@ -587,9 +587,9 @@ static noinline void cache_free_pfmemall
+@@ -564,9 +564,9 @@ static noinline void cache_free_pfmemall
page_node = page_to_nid(page);
n = get_node(cachep, page_node);
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
-@@ -718,7 +718,7 @@ static void __drain_alien_cache(struct k
+@@ -694,7 +694,7 @@ static void __drain_alien_cache(struct k
struct kmem_cache_node *n = get_node(cachep, node);
if (ac->avail) {
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Stuff objects into the remote nodes shared array first.
* That way we could avoid the overhead of putting the objects
-@@ -729,7 +729,7 @@ static void __drain_alien_cache(struct k
+@@ -705,7 +705,7 @@ static void __drain_alien_cache(struct k
free_block(cachep, ac->entry, ac->avail, node, list);
ac->avail = 0;
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -802,9 +802,9 @@ static int __cache_free_alien(struct kme
+@@ -778,9 +778,9 @@ static int __cache_free_alien(struct kme
slabs_destroy(cachep, &list);
} else {
n = get_node(cachep, page_node);
@@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
return 1;
-@@ -845,10 +845,10 @@ static int init_cache_node(struct kmem_c
+@@ -821,10 +821,10 @@ static int init_cache_node(struct kmem_c
*/
n = get_node(cachep, node);
if (n) {
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -927,7 +927,7 @@ static int setup_kmem_cache_node(struct
+@@ -903,7 +903,7 @@ static int setup_kmem_cache_node(struct
goto fail;
n = get_node(cachep, node);
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (n->shared && force_change) {
free_block(cachep, n->shared->entry,
n->shared->avail, node, &list);
-@@ -945,7 +945,7 @@ static int setup_kmem_cache_node(struct
+@@ -921,7 +921,7 @@ static int setup_kmem_cache_node(struct
new_alien = NULL;
}
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
/*
-@@ -984,7 +984,7 @@ static void cpuup_canceled(long cpu)
+@@ -960,7 +960,7 @@ static void cpuup_canceled(long cpu)
if (!n)
continue;
@@ -110,8 +110,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Free limit for this kmem_cache_node */
n->free_limit -= cachep->batchcount;
-@@ -997,7 +997,7 @@ static void cpuup_canceled(long cpu)
- }
+@@ -971,7 +971,7 @@ static void cpuup_canceled(long cpu)
+ nc->avail = 0;
if (!cpumask_empty(mask)) {
- spin_unlock_irq(&n->list_lock);
@@ -119,7 +119,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto free_slab;
}
-@@ -1011,7 +1011,7 @@ static void cpuup_canceled(long cpu)
+@@ -985,7 +985,7 @@ static void cpuup_canceled(long cpu)
alien = n->alien;
n->alien = NULL;
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
kfree(shared);
if (alien) {
-@@ -1195,7 +1195,7 @@ static void __init init_list(struct kmem
+@@ -1169,7 +1169,7 @@ static void __init init_list(struct kmem
/*
* Do not assume that spinlocks can be initialized via memcpy:
*/
@@ -137,7 +137,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
MAKE_ALL_LISTS(cachep, ptr, nodeid);
cachep->node[nodeid] = ptr;
-@@ -1366,11 +1366,11 @@ slab_out_of_memory(struct kmem_cache *ca
+@@ -1340,11 +1340,11 @@ slab_out_of_memory(struct kmem_cache *ca
for_each_kmem_cache_node(cachep, node, n) {
unsigned long total_slabs, free_slabs, free_objs;
@@ -151,7 +151,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
node, total_slabs - free_slabs, total_slabs,
-@@ -2165,7 +2165,7 @@ static void check_spinlock_acquired(stru
+@@ -2107,7 +2107,7 @@ static void check_spinlock_acquired(stru
{
#ifdef CONFIG_SMP
check_irq_off();
@@ -160,7 +160,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
}
-@@ -2173,7 +2173,7 @@ static void check_spinlock_acquired_node
+@@ -2115,7 +2115,7 @@ static void check_spinlock_acquired_node
{
#ifdef CONFIG_SMP
check_irq_off();
@@ -169,7 +169,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
}
-@@ -2213,9 +2213,9 @@ static void do_drain(void *arg)
+@@ -2155,9 +2155,9 @@ static void do_drain(void *arg)
check_irq_off();
ac = cpu_cache_get(cachep);
n = get_node(cachep, node);
@@ -181,7 +181,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
ac->avail = 0;
}
-@@ -2233,9 +2233,9 @@ static void drain_cpu_caches(struct kmem
+@@ -2175,9 +2175,9 @@ static void drain_cpu_caches(struct kmem
drain_alien_cache(cachep, n->alien);
for_each_kmem_cache_node(cachep, node, n) {
@@ -193,7 +193,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
-@@ -2257,10 +2257,10 @@ static int drain_freelist(struct kmem_ca
+@@ -2199,10 +2199,10 @@ static int drain_freelist(struct kmem_ca
nr_freed = 0;
while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
@@ -206,7 +206,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
}
-@@ -2273,7 +2273,7 @@ static int drain_freelist(struct kmem_ca
+@@ -2215,7 +2215,7 @@ static int drain_freelist(struct kmem_ca
* to the cache.
*/
n->free_objects -= cache->num;
@@ -215,16 +215,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slab_destroy(cache, page);
nr_freed++;
}
-@@ -2728,7 +2728,7 @@ static void cache_grow_end(struct kmem_c
- INIT_LIST_HEAD(&page->lru);
+@@ -2664,7 +2664,7 @@ static void cache_grow_end(struct kmem_c
+ INIT_LIST_HEAD(&page->slab_list);
n = get_node(cachep, page_to_nid(page));
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
n->total_slabs++;
if (!page->active) {
- list_add_tail(&page->lru, &(n->slabs_free));
-@@ -2738,7 +2738,7 @@ static void cache_grow_end(struct kmem_c
+ list_add_tail(&page->slab_list, &n->slabs_free);
+@@ -2674,7 +2674,7 @@ static void cache_grow_end(struct kmem_c
STATS_INC_GROWN(cachep);
n->free_objects += cachep->num - page->active;
@@ -233,16 +233,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
fixup_objfreelist_debug(cachep, &list);
}
-@@ -2906,7 +2906,7 @@ static struct page *get_first_slab(struc
+@@ -2840,7 +2840,7 @@ static struct page *get_first_slab(struc
{
struct page *page;
- assert_spin_locked(&n->list_lock);
+ assert_raw_spin_locked(&n->list_lock);
- page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
+ page = list_first_entry_or_null(&n->slabs_partial, struct page,
+ slab_list);
if (!page) {
- n->free_touched = 1;
-@@ -2932,10 +2932,10 @@ static noinline void *cache_alloc_pfmema
+@@ -2867,10 +2867,10 @@ static noinline void *cache_alloc_pfmema
if (!gfp_pfmemalloc_allowed(flags))
return NULL;
@@ -255,7 +255,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return NULL;
}
-@@ -2944,7 +2944,7 @@ static noinline void *cache_alloc_pfmema
+@@ -2879,7 +2879,7 @@ static noinline void *cache_alloc_pfmema
fixup_slab_list(cachep, n, page, &list);
@@ -264,7 +264,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
fixup_objfreelist_debug(cachep, &list);
return obj;
-@@ -3003,7 +3003,7 @@ static void *cache_alloc_refill(struct k
+@@ -2938,7 +2938,7 @@ static void *cache_alloc_refill(struct k
if (!n->free_objects && (!shared || !shared->avail))
goto direct_grow;
@@ -273,7 +273,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
shared = READ_ONCE(n->shared);
/* See if we can refill from the shared array */
-@@ -3027,7 +3027,7 @@ static void *cache_alloc_refill(struct k
+@@ -2962,7 +2962,7 @@ static void *cache_alloc_refill(struct k
must_grow:
n->free_objects -= ac->avail;
alloc_done:
@@ -282,7 +282,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
fixup_objfreelist_debug(cachep, &list);
direct_grow:
-@@ -3252,7 +3252,7 @@ static void *____cache_alloc_node(struct
+@@ -3187,7 +3187,7 @@ static void *____cache_alloc_node(struct
BUG_ON(!n);
check_irq_off();
@@ -291,7 +291,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
page = get_first_slab(n, false);
if (!page)
goto must_grow;
-@@ -3270,12 +3270,12 @@ static void *____cache_alloc_node(struct
+@@ -3205,12 +3205,12 @@ static void *____cache_alloc_node(struct
fixup_slab_list(cachep, n, page, &list);
@@ -306,7 +306,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
if (page) {
/* This slab isn't counted yet so don't update free_objects */
-@@ -3451,7 +3451,7 @@ static void cache_flusharray(struct kmem
+@@ -3386,7 +3386,7 @@ static void cache_flusharray(struct kmem
check_irq_off();
n = get_node(cachep, node);
@@ -315,7 +315,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (n->shared) {
struct array_cache *shared_array = n->shared;
int max = shared_array->limit - shared_array->avail;
-@@ -3480,7 +3480,7 @@ static void cache_flusharray(struct kmem
+@@ -3415,7 +3415,7 @@ static void cache_flusharray(struct kmem
STATS_SET_FREEABLE(cachep, i);
}
#endif
@@ -324,7 +324,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
ac->avail -= batchcount;
memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
-@@ -3888,9 +3888,9 @@ static int __do_tune_cpucache(struct kme
+@@ -3829,9 +3829,9 @@ static int __do_tune_cpucache(struct kme
node = cpu_to_mem(cpu);
n = get_node(cachep, node);
@@ -336,7 +336,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
free_percpu(prev);
-@@ -4015,9 +4015,9 @@ static void drain_array(struct kmem_cach
+@@ -3956,9 +3956,9 @@ static void drain_array(struct kmem_cach
return;
}
@@ -348,7 +348,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
-@@ -4101,7 +4101,7 @@ void get_slabinfo(struct kmem_cache *cac
+@@ -4042,7 +4042,7 @@ void get_slabinfo(struct kmem_cache *cac
for_each_kmem_cache_node(cachep, node, n) {
check_irq_on();
@@ -357,7 +357,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
total_slabs += n->total_slabs;
free_slabs += n->free_slabs;
-@@ -4110,7 +4110,7 @@ void get_slabinfo(struct kmem_cache *cac
+@@ -4051,7 +4051,7 @@ void get_slabinfo(struct kmem_cache *cac
if (n->shared)
shared_avail += n->shared->avail;
@@ -366,25 +366,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
num_objs = total_slabs * cachep->num;
active_slabs = total_slabs - free_slabs;
-@@ -4325,13 +4325,13 @@ static int leaks_show(struct seq_file *m
- for_each_kmem_cache_node(cachep, node, n) {
-
- check_irq_on();
-- spin_lock_irq(&n->list_lock);
-+ raw_spin_lock_irq(&n->list_lock);
-
- list_for_each_entry(page, &n->slabs_full, lru)
- handle_slab(x, cachep, page);
- list_for_each_entry(page, &n->slabs_partial, lru)
- handle_slab(x, cachep, page);
-- spin_unlock_irq(&n->list_lock);
-+ raw_spin_unlock_irq(&n->list_lock);
- }
- } while (!is_store_user_clean(cachep));
-
--- a/mm/slab.h
+++ b/mm/slab.h
-@@ -453,7 +453,7 @@ static inline void slab_post_alloc_hook(
+@@ -449,7 +449,7 @@ static inline void slab_post_alloc_hook(
* The slab lists for all objects.
*/
struct kmem_cache_node {
@@ -395,7 +379,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct list_head slabs_partial; /* partial list first, better asm code */
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -1184,7 +1184,7 @@ static noinline int free_debug_processin
+@@ -1175,7 +1175,7 @@ static noinline int free_debug_processin
unsigned long uninitialized_var(flags);
int ret = 0;
@@ -404,7 +388,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slab_lock(page);
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
-@@ -1219,7 +1219,7 @@ static noinline int free_debug_processin
+@@ -1210,7 +1210,7 @@ static noinline int free_debug_processin
bulk_cnt, cnt);
slab_unlock(page);
@@ -413,16 +397,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!ret)
slab_fix(s, "Object at 0x%p not freed", object);
return ret;
-@@ -1863,7 +1863,7 @@ static void *get_partial_node(struct kme
+@@ -1854,7 +1854,7 @@ static void *get_partial_node(struct kme
if (!n || !n->nr_partial)
return NULL;
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
- list_for_each_entry_safe(page, page2, &n->partial, lru) {
+ list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
void *t;
-@@ -1888,7 +1888,7 @@ static void *get_partial_node(struct kme
+@@ -1879,7 +1879,7 @@ static void *get_partial_node(struct kme
break;
}
@@ -431,7 +415,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return object;
}
-@@ -2134,7 +2134,7 @@ static void deactivate_slab(struct kmem_
+@@ -2125,7 +2125,7 @@ static void deactivate_slab(struct kmem_
* that acquire_slab() will see a slab page that
* is frozen
*/
@@ -440,7 +424,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
} else {
m = M_FULL;
-@@ -2145,7 +2145,7 @@ static void deactivate_slab(struct kmem_
+@@ -2136,7 +2136,7 @@ static void deactivate_slab(struct kmem_
* slabs from diagnostic functions will not see
* any frozen slabs.
*/
@@ -449,7 +433,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -2169,7 +2169,7 @@ static void deactivate_slab(struct kmem_
+@@ -2160,7 +2160,7 @@ static void deactivate_slab(struct kmem_
goto redo;
if (lock)
@@ -458,7 +442,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (m == M_PARTIAL)
stat(s, tail);
-@@ -2208,10 +2208,10 @@ static void unfreeze_partials(struct kme
+@@ -2199,10 +2199,10 @@ static void unfreeze_partials(struct kme
n2 = get_node(s, page_to_nid(page));
if (n != n2) {
if (n)
@@ -471,7 +455,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
do {
-@@ -2240,7 +2240,7 @@ static void unfreeze_partials(struct kme
+@@ -2231,7 +2231,7 @@ static void unfreeze_partials(struct kme
}
if (n)
@@ -480,20 +464,20 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
while (discard_page) {
page = discard_page;
-@@ -2407,10 +2407,10 @@ static unsigned long count_partial(struc
+@@ -2398,10 +2398,10 @@ static unsigned long count_partial(struc
unsigned long x = 0;
struct page *page;
- spin_lock_irqsave(&n->list_lock, flags);
+ raw_spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->partial, lru)
+ list_for_each_entry(page, &n->partial, slab_list)
x += get_count(page);
- spin_unlock_irqrestore(&n->list_lock, flags);
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
return x;
}
#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
-@@ -2845,7 +2845,7 @@ static void __slab_free(struct kmem_cach
+@@ -2835,7 +2835,7 @@ static void __slab_free(struct kmem_cach
do {
if (unlikely(n)) {
@@ -502,7 +486,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
n = NULL;
}
prior = page->freelist;
-@@ -2877,7 +2877,7 @@ static void __slab_free(struct kmem_cach
+@@ -2867,7 +2867,7 @@ static void __slab_free(struct kmem_cach
* Otherwise the list_lock will synchronize with
* other processors updating the list of slabs.
*/
@@ -511,7 +495,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -2919,7 +2919,7 @@ static void __slab_free(struct kmem_cach
+@@ -2908,7 +2908,7 @@ static void __slab_free(struct kmem_cach
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
@@ -520,7 +504,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return;
slab_empty:
-@@ -2934,7 +2934,7 @@ static void __slab_free(struct kmem_cach
+@@ -2923,7 +2923,7 @@ static void __slab_free(struct kmem_cach
remove_full(s, n, page);
}
@@ -529,7 +513,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stat(s, FREE_SLAB);
discard_slab(s, page);
}
-@@ -3321,7 +3321,7 @@ static void
+@@ -3310,7 +3310,7 @@ static void
init_kmem_cache_node(struct kmem_cache_node *n)
{
n->nr_partial = 0;
@@ -538,25 +522,25 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
-@@ -3706,7 +3706,7 @@ static void free_partial(struct kmem_cac
+@@ -3695,7 +3695,7 @@ static void free_partial(struct kmem_cac
struct page *page, *h;
BUG_ON(irqs_disabled());
- spin_lock_irq(&n->list_lock);
+ raw_spin_lock_irq(&n->list_lock);
- list_for_each_entry_safe(page, h, &n->partial, lru) {
+ list_for_each_entry_safe(page, h, &n->partial, slab_list) {
if (!page->inuse) {
remove_partial(n, page);
-@@ -3716,7 +3716,7 @@ static void free_partial(struct kmem_cac
+@@ -3705,7 +3705,7 @@ static void free_partial(struct kmem_cac
"Objects remaining in %s on __kmem_cache_shutdown()");
}
}
- spin_unlock_irq(&n->list_lock);
+ raw_spin_unlock_irq(&n->list_lock);
- list_for_each_entry_safe(page, h, &discard, lru)
+ list_for_each_entry_safe(page, h, &discard, slab_list)
discard_slab(s, page);
-@@ -3990,7 +3990,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -3979,7 +3979,7 @@ int __kmem_cache_shrink(struct kmem_cach
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
INIT_LIST_HEAD(promote + i);
@@ -565,7 +549,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Build lists of slabs to discard or promote.
-@@ -4021,7 +4021,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -4010,7 +4010,7 @@ int __kmem_cache_shrink(struct kmem_cach
for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
list_splice(promote + i, &n->partial);
@@ -573,17 +557,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
/* Release empty slabs */
- list_for_each_entry_safe(page, t, &discard, lru)
-@@ -4435,7 +4435,7 @@ static int validate_slab_node(struct kme
+ list_for_each_entry_safe(page, t, &discard, slab_list)
+@@ -4424,7 +4424,7 @@ static int validate_slab_node(struct kme
struct page *page;
unsigned long flags;
- spin_lock_irqsave(&n->list_lock, flags);
+ raw_spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->partial, lru) {
+ list_for_each_entry(page, &n->partial, slab_list) {
validate_slab_slab(s, page, map);
-@@ -4457,7 +4457,7 @@ static int validate_slab_node(struct kme
+@@ -4446,7 +4446,7 @@ static int validate_slab_node(struct kme
s->name, count, atomic_long_read(&n->nr_slabs));
out:
@@ -592,15 +576,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return count;
}
-@@ -4643,12 +4643,12 @@ static int list_locations(struct kmem_ca
+@@ -4632,12 +4632,12 @@ static int list_locations(struct kmem_ca
if (!atomic_long_read(&n->nr_slabs))
continue;
- spin_lock_irqsave(&n->list_lock, flags);
+ raw_spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->partial, lru)
+ list_for_each_entry(page, &n->partial, slab_list)
process_slab(&t, s, page, alloc, map);
- list_for_each_entry(page, &n->full, lru)
+ list_for_each_entry(page, &n->full, slab_list)
process_slab(&t, s, page, alloc, map);
- spin_unlock_irqrestore(&n->list_lock, flags);
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
diff --git a/debian/patches-rt/0003-printk-rb-define-ring-buffer-struct-and-initializer.patch b/debian/patches-rt/0003-printk-rb-define-ring-buffer-struct-and-initializer.patch
index c2da97cac..4b182c1a1 100644
--- a/debian/patches-rt/0003-printk-rb-define-ring-buffer-struct-and-initializer.patch
+++ b/debian/patches-rt/0003-printk-rb-define-ring-buffer-struct-and-initializer.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:41 +0100
Subject: [PATCH 03/25] printk-rb: define ring buffer struct and initializer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
See Documentation/printk-ringbuffer.txt for details about the
initializer arguments.
diff --git a/debian/patches-rt/0003-x86-fpu-Remove-preempt_disable-in-fpu__clear.patch b/debian/patches-rt/0003-x86-fpu-Remove-preempt_disable-in-fpu__clear.patch
deleted file mode 100644
index ba4cb872d..000000000
--- a/debian/patches-rt/0003-x86-fpu-Remove-preempt_disable-in-fpu__clear.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 17 Oct 2018 14:58:28 +0200
-Subject: [PATCH 03/27] x86/fpu: Remove preempt_disable() in fpu__clear()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-The preempt_disable() section was introduced in commit
-
- a10b6a16cdad8 ("x86/fpu: Make the fpu state change in fpu__clear() scheduler-atomic")
-
-and it was said to be temporary.
-
-fpu__initialize() initializes the FPU struct to its "init" value and
-then sets ->initialized to 1. The last part is the important one.
-The content of the `state' does not matter because it gets set via
-copy_init_fpstate_to_fpregs().
-A preemption here has little meaning because the registers will always be
-set to the same content after copy_init_fpstate_to_fpregs(). A softirq
-with a kernel_fpu_begin() could also force to save FPU's registers after
-fpu__initialize() without changing the outcome here.
-
-Remove the preempt_disable() section in fpu__clear(), preemption here
-does not hurt.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Reviewed-by: Borislav Petkov <bp@suse.de>
----
- arch/x86/kernel/fpu/core.c | 2 --
- 1 file changed, 2 deletions(-)
-
---- a/arch/x86/kernel/fpu/core.c
-+++ b/arch/x86/kernel/fpu/core.c
-@@ -366,11 +366,9 @@ void fpu__clear(struct fpu *fpu)
- * Make sure fpstate is cleared and initialized.
- */
- if (static_cpu_has(X86_FEATURE_FPU)) {
-- preempt_disable();
- fpu__initialize(fpu);
- user_fpu_begin();
- copy_init_fpstate_to_fpregs();
-- preempt_enable();
- }
- }
-
diff --git a/debian/patches-rt/0004-ARM-at91-Implement-clocksource-selection.patch b/debian/patches-rt/0004-ARM-at91-Implement-clocksource-selection.patch
deleted file mode 100644
index 4becaf254..000000000
--- a/debian/patches-rt/0004-ARM-at91-Implement-clocksource-selection.patch
+++ /dev/null
@@ -1,46 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Date: Fri, 26 Apr 2019 23:47:13 +0200
-Subject: [PATCH 04/10] ARM: at91: Implement clocksource selection
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Allow selecting and unselecting the PIT clocksource driver so it doesn't
-have to be compiled when unused.
-
-Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/arm/mach-at91/Kconfig | 23 +++++++++++++++++++++++
- 1 file changed, 23 insertions(+)
-
---- a/arch/arm/mach-at91/Kconfig
-+++ b/arch/arm/mach-at91/Kconfig
-@@ -107,6 +107,29 @@ config SOC_AT91SAM9
- AT91SAM9X35
- AT91SAM9XE
-
-+comment "Clocksource driver selection"
-+
-+config ATMEL_CLOCKSOURCE_PIT
-+ bool "Periodic Interval Timer (PIT) support"
-+ depends on SOC_AT91SAM9 || SOC_SAMA5
-+ default SOC_AT91SAM9 || SOC_SAMA5
-+ select ATMEL_PIT
-+ help
-+ Select this to get a clocksource based on the Atmel Periodic Interval
-+ Timer. It has a relatively low resolution and the TC Block clocksource
-+ should be preferred.
-+
-+config ATMEL_CLOCKSOURCE_TCB
-+ bool "Timer Counter Blocks (TCB) support"
-+ default SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAMA5
-+ select ATMEL_TCB_CLKSRC
-+ help
-+ Select this to get a high precision clocksource based on a
-+ TC block with a 5+ MHz base clock rate.
-+ On platforms with 16-bit counters, two timer channels are combined
-+ to make a single 32-bit timer.
-+ It can also be used as a clock event device supporting oneshot mode.
-+
- config HAVE_AT91_UTMI
- bool
-
diff --git a/debian/patches-rt/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch b/debian/patches-rt/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch
index 8d4248c37..a6d731fb1 100644
--- a/debian/patches-rt/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch
+++ b/debian/patches-rt/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch
@@ -2,7 +2,7 @@ From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 21 Jun 2018 17:29:19 +0200
Subject: [PATCH 4/4] mm/SLUB: delay giving back empty slubs to IRQ enabled
regions
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
__free_slab() is invoked with disabled interrupts which increases the
irq-off time while __free_pages() is doing the work.
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -1387,6 +1387,12 @@ static inline void dec_slabs_node(struct
+@@ -1378,6 +1378,12 @@ static inline void dec_slabs_node(struct
#endif /* CONFIG_SLUB_DEBUG */
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
-@@ -1745,6 +1751,16 @@ static void __free_slab(struct kmem_cach
+@@ -1736,6 +1742,16 @@ static void __free_slab(struct kmem_cach
__free_pages(page, order);
}
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void rcu_free_slab(struct rcu_head *h)
{
struct page *page = container_of(h, struct page, rcu_head);
-@@ -1756,6 +1772,12 @@ static void free_slab(struct kmem_cache
+@@ -1747,6 +1763,12 @@ static void free_slab(struct kmem_cache
{
if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
call_rcu(&page->rcu_head, rcu_free_slab);
@@ -61,7 +61,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} else
__free_slab(s, page);
}
-@@ -2277,14 +2299,21 @@ static void put_cpu_partial(struct kmem_
+@@ -2268,14 +2290,21 @@ static void put_cpu_partial(struct kmem_
pobjects = oldpage->pobjects;
pages = oldpage->pages;
if (drain && pobjects > s->cpu_partial) {
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
oldpage = NULL;
pobjects = 0;
pages = 0;
-@@ -2352,7 +2381,22 @@ static bool has_cpu_slab(int cpu, void *
+@@ -2343,7 +2372,22 @@ static bool has_cpu_slab(int cpu, void *
static void flush_all(struct kmem_cache *s)
{
@@ -106,7 +106,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2550,8 +2594,10 @@ static inline void *get_freelist(struct
+@@ -2540,8 +2584,10 @@ static inline void *get_freelist(struct
* already disabled (which is the case for bulk allocation).
*/
static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
@@ -118,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void *freelist;
struct page *page;
-@@ -2607,6 +2653,13 @@ static void *___slab_alloc(struct kmem_c
+@@ -2597,6 +2643,13 @@ static void *___slab_alloc(struct kmem_c
VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
@@ -132,7 +132,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return freelist;
new_slab:
-@@ -2622,7 +2675,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2612,7 +2665,7 @@ static void *___slab_alloc(struct kmem_c
if (unlikely(!freelist)) {
slab_out_of_memory(s, gfpflags, node);
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
page = c->page;
-@@ -2635,7 +2688,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2625,7 +2678,7 @@ static void *___slab_alloc(struct kmem_c
goto new_slab; /* Slab failed checks. Next slab needed */
deactivate_slab(s, page, get_freepointer(s, freelist), c);
@@ -150,7 +150,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2647,6 +2700,7 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2637,6 +2690,7 @@ static void *__slab_alloc(struct kmem_ca
{
void *p;
unsigned long flags;
@@ -158,7 +158,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_irq_save(flags);
#ifdef CONFIG_PREEMPT
-@@ -2658,8 +2712,9 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2648,8 +2702,9 @@ static void *__slab_alloc(struct kmem_ca
c = this_cpu_ptr(s->cpu_slab);
#endif
@@ -169,7 +169,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return p;
}
-@@ -3137,6 +3192,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3126,6 +3181,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
void **p)
{
struct kmem_cache_cpu *c;
@@ -177,7 +177,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int i;
/* memcg and kmem_cache debug support */
-@@ -3160,7 +3216,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3149,7 +3205,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
* of re-populating per CPU c->freelist
*/
p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
@@ -186,7 +186,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(!p[i]))
goto error;
-@@ -3172,6 +3228,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3161,6 +3217,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
}
c->tid = next_tid(c->tid);
local_irq_enable();
@@ -194,7 +194,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Clear memory outside IRQ disabled fastpath loop */
if (unlikely(flags & __GFP_ZERO)) {
-@@ -3186,6 +3243,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3175,6 +3232,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
return i;
error:
local_irq_enable();
@@ -202,7 +202,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slab_post_alloc_hook(s, flags, i, p);
__kmem_cache_free_bulk(s, i, p);
return 0;
-@@ -4234,6 +4292,12 @@ void __init kmem_cache_init(void)
+@@ -4223,6 +4281,12 @@ void __init kmem_cache_init(void)
{
static __initdata struct kmem_cache boot_kmem_cache,
boot_kmem_cache_node;
diff --git a/debian/patches-rt/0004-printk-rb-add-writer-interface.patch b/debian/patches-rt/0004-printk-rb-add-writer-interface.patch
index 7fdbcd4ee..c203e72f9 100644
--- a/debian/patches-rt/0004-printk-rb-add-writer-interface.patch
+++ b/debian/patches-rt/0004-printk-rb-add-writer-interface.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:42 +0100
Subject: [PATCH 04/25] printk-rb: add writer interface
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Add the writer functions prb_reserve() and prb_commit(). These make
use of processor-reentrant spin locks to limit the number of possible
diff --git a/debian/patches-rt/0004-x86-fpu-Always-init-the-state-in-fpu__clear.patch b/debian/patches-rt/0004-x86-fpu-Always-init-the-state-in-fpu__clear.patch
deleted file mode 100644
index 3226a450b..000000000
--- a/debian/patches-rt/0004-x86-fpu-Always-init-the-state-in-fpu__clear.patch
+++ /dev/null
@@ -1,75 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 17 Oct 2018 15:27:34 +0200
-Subject: [PATCH 04/27] x86/fpu: Always init the `state' in fpu__clear()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-fpu__clear() only initializes the `state' if the FPU is present. This
-initialisation is also required for the FPU-less system and takes place
-in math_emulate(). Since fpu__initialize() only performs the
-initialization if ->initialized is zero it does not matter that it is
-invoked each time an opcode is emulated. It makes the removal of
-->initialized easier if the struct is also initialized in the FPU-less
-case at the same time.
-
-Move fpu__initialize() before the FPU check so it is also performed in
-the FPU-less case.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Reviewed-by: Borislav Petkov <bp@suse.de>
----
- arch/x86/include/asm/fpu/internal.h | 1 -
- arch/x86/kernel/fpu/core.c | 5 ++---
- arch/x86/math-emu/fpu_entry.c | 3 ---
- 3 files changed, 2 insertions(+), 7 deletions(-)
-
---- a/arch/x86/include/asm/fpu/internal.h
-+++ b/arch/x86/include/asm/fpu/internal.h
-@@ -24,7 +24,6 @@
- /*
- * High level FPU state handling functions:
- */
--extern void fpu__initialize(struct fpu *fpu);
- extern void fpu__prepare_read(struct fpu *fpu);
- extern void fpu__prepare_write(struct fpu *fpu);
- extern void fpu__save(struct fpu *fpu);
---- a/arch/x86/kernel/fpu/core.c
-+++ b/arch/x86/kernel/fpu/core.c
-@@ -223,7 +223,7 @@ int fpu__copy(struct fpu *dst_fpu, struc
- * Activate the current task's in-memory FPU context,
- * if it has not been used before:
- */
--void fpu__initialize(struct fpu *fpu)
-+static void fpu__initialize(struct fpu *fpu)
- {
- WARN_ON_FPU(fpu != &current->thread.fpu);
-
-@@ -236,7 +236,6 @@ void fpu__initialize(struct fpu *fpu)
- fpu->initialized = 1;
- }
- }
--EXPORT_SYMBOL_GPL(fpu__initialize);
-
- /*
- * This function must be called before we read a task's fpstate.
-@@ -365,8 +364,8 @@ void fpu__clear(struct fpu *fpu)
- /*
- * Make sure fpstate is cleared and initialized.
- */
-+ fpu__initialize(fpu);
- if (static_cpu_has(X86_FEATURE_FPU)) {
-- fpu__initialize(fpu);
- user_fpu_begin();
- copy_init_fpstate_to_fpregs();
- }
---- a/arch/x86/math-emu/fpu_entry.c
-+++ b/arch/x86/math-emu/fpu_entry.c
-@@ -113,9 +113,6 @@ void math_emulate(struct math_emu_info *
- unsigned long code_base = 0;
- unsigned long code_limit = 0; /* Initialized to stop compiler warnings */
- struct desc_struct code_descriptor;
-- struct fpu *fpu = &current->thread.fpu;
--
-- fpu__initialize(fpu);
-
- #ifdef RE_ENTRANT_CHECKING
- if (emulating) {
diff --git a/debian/patches-rt/0005-clocksource-drivers-tcb_clksrc-move-Kconfig-option.patch b/debian/patches-rt/0005-clocksource-drivers-tcb_clksrc-move-Kconfig-option.patch
deleted file mode 100644
index 374831628..000000000
--- a/debian/patches-rt/0005-clocksource-drivers-tcb_clksrc-move-Kconfig-option.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Date: Fri, 26 Apr 2019 23:47:14 +0200
-Subject: [PATCH 05/10] clocksource/drivers/tcb_clksrc: move Kconfig option
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Move the ATMEL_TCB_CLKSRC option to drivers/clocksource and make it silent
-if COMPILE_TEST is not selected.
-
-Cc: Arnd Bergmann <arnd@arndb.de>
-Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/clocksource/Kconfig | 7 +++++++
- drivers/misc/Kconfig | 14 --------------
- 2 files changed, 7 insertions(+), 14 deletions(-)
-
---- a/drivers/clocksource/Kconfig
-+++ b/drivers/clocksource/Kconfig
-@@ -409,6 +409,13 @@ config ATMEL_ST
- help
- Support for the Atmel ST timer.
-
-+config ATMEL_TCB_CLKSRC
-+ bool "Atmel TC Block timer driver" if COMPILE_TEST
-+ depends on HAS_IOMEM
-+ select TIMER_OF if OF
-+ help
-+ Support for Timer Counter Blocks on Atmel SoCs.
-+
- config CLKSRC_EXYNOS_MCT
- bool "Exynos multi core timer driver" if COMPILE_TEST
- depends on ARM || ARM64
---- a/drivers/misc/Kconfig
-+++ b/drivers/misc/Kconfig
-@@ -59,20 +59,6 @@ config ATMEL_TCLIB
- blocks found on many Atmel processors. This facilitates using
- these blocks by different drivers despite processor differences.
-
--config ATMEL_TCB_CLKSRC
-- bool "TC Block Clocksource"
-- depends on ARCH_AT91
-- select TIMER_OF if OF
-- default y
-- help
-- Select this to get a high precision clocksource based on a
-- TC block with a 5+ MHz base clock rate. Two timer channels
-- are combined to make a single 32-bit timer.
--
-- When GENERIC_CLOCKEVENTS is defined, the third timer channel
-- may be used as a clock event device supporting oneshot mode
-- (delays of up to two seconds) based on the 32 KiHz clock.
--
- config DUMMY_IRQ
- tristate "Dummy IRQ handler"
- default n
diff --git a/debian/patches-rt/0005-printk-rb-add-basic-non-blocking-reading-interface.patch b/debian/patches-rt/0005-printk-rb-add-basic-non-blocking-reading-interface.patch
index cd1d05181..3cfed83e2 100644
--- a/debian/patches-rt/0005-printk-rb-add-basic-non-blocking-reading-interface.patch
+++ b/debian/patches-rt/0005-printk-rb-add-basic-non-blocking-reading-interface.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:43 +0100
Subject: [PATCH 05/25] printk-rb: add basic non-blocking reading interface
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Add reader iterator static declaration/initializer, dynamic
initializer, and functions to iterate and retrieve ring buffer data.
diff --git a/debian/patches-rt/0005-x86-fpu-Remove-fpu-initialized-usage-in-copy_fpstate.patch b/debian/patches-rt/0005-x86-fpu-Remove-fpu-initialized-usage-in-copy_fpstate.patch
deleted file mode 100644
index 5d6049d5e..000000000
--- a/debian/patches-rt/0005-x86-fpu-Remove-fpu-initialized-usage-in-copy_fpstate.patch
+++ /dev/null
@@ -1,90 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 19 Oct 2018 16:57:14 +0200
-Subject: [PATCH 05/27] x86/fpu: Remove fpu->initialized usage in
- copy_fpstate_to_sigframe()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-With lazy-FPU support the (now named variable) ->initialized was set to true if
-the CPU's FPU registers were holding the a valid state of the FPU registers for
-the active process. If it was set to false then the FPU state was saved in
-fpu->state and the FPU was deactivated.
-With lazy-FPU gone, ->initialized is always true for user threads and kernel
-threads never this function so ->initialized is always true in
-copy_fpstate_to_sigframe().
-The using_compacted_format() check is also a leftover from the lazy-FPU time.
-In the `->initialized == false' case copy_to_user() would copy the compacted
-buffer while userland would expect the non-compacted format instead. So in
-order to save the FPU state in the non-compacted form it issues the xsave
-opcode to save the *current* FPU state.
-The FPU is not enabled so the attempt raises the FPU trap, the trap restores
-the FPU content and re-enables the FPU and the xsave opcode is invoked again and
-succeeds. *This* does not longer work since commit
-
- bef8b6da9522 ("x86/fpu: Handle #NM without FPU emulation as an error")
-
-Remove check for ->initialized because it is always true and remove the
-false condition. Update the comment to reflect that the "state is always live".
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/fpu/signal.c | 35 ++++++++---------------------------
- 1 file changed, 8 insertions(+), 27 deletions(-)
-
---- a/arch/x86/kernel/fpu/signal.c
-+++ b/arch/x86/kernel/fpu/signal.c
-@@ -144,9 +144,8 @@ static inline int copy_fpregs_to_sigfram
- * buf == buf_fx for 64-bit frames and 32-bit fsave frame.
- * buf != buf_fx for 32-bit frames with fxstate.
- *
-- * If the fpu, extended register state is live, save the state directly
-- * to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise,
-- * copy the thread's fpu state to the user frame starting at 'buf_fx'.
-+ * Save the state directly to the user frame pointed by the aligned pointer
-+ * 'buf_fx'.
- *
- * If this is a 32-bit frame with fxstate, put a fsave header before
- * the aligned state at 'buf_fx'.
-@@ -157,7 +156,6 @@ static inline int copy_fpregs_to_sigfram
- int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
- {
- struct fpu *fpu = &current->thread.fpu;
-- struct xregs_state *xsave = &fpu->state.xsave;
- struct task_struct *tsk = current;
- int ia32_fxstate = (buf != buf_fx);
-
-@@ -172,29 +170,12 @@ int copy_fpstate_to_sigframe(void __user
- sizeof(struct user_i387_ia32_struct), NULL,
- (struct _fpstate_32 __user *) buf) ? -1 : 1;
-
-- if (fpu->initialized || using_compacted_format()) {
-- /* Save the live register state to the user directly. */
-- if (copy_fpregs_to_sigframe(buf_fx))
-- return -1;
-- /* Update the thread's fxstate to save the fsave header. */
-- if (ia32_fxstate)
-- copy_fxregs_to_kernel(fpu);
-- } else {
-- /*
-- * It is a *bug* if kernel uses compacted-format for xsave
-- * area and we copy it out directly to a signal frame. It
-- * should have been handled above by saving the registers
-- * directly.
-- */
-- if (boot_cpu_has(X86_FEATURE_XSAVES)) {
-- WARN_ONCE(1, "x86/fpu: saving compacted-format xsave area to a signal frame!\n");
-- return -1;
-- }
--
-- fpstate_sanitize_xstate(fpu);
-- if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
-- return -1;
-- }
-+ /* Save the live register state to the user directly. */
-+ if (copy_fpregs_to_sigframe(buf_fx))
-+ return -1;
-+ /* Update the thread's fxstate to save the fsave header. */
-+ if (ia32_fxstate)
-+ copy_fxregs_to_kernel(fpu);
-
- /* Save the fsave header for the 32-bit frames. */
- if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
diff --git a/debian/patches-rt/0006-clocksource-drivers-timer-atmel-pit-rework-Kconfig-o.patch b/debian/patches-rt/0006-clocksource-drivers-timer-atmel-pit-rework-Kconfig-o.patch
deleted file mode 100644
index fb589262d..000000000
--- a/debian/patches-rt/0006-clocksource-drivers-timer-atmel-pit-rework-Kconfig-o.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Date: Fri, 26 Apr 2019 23:47:15 +0200
-Subject: [PATCH 06/10] clocksource/drivers/timer-atmel-pit: rework Kconfig
- option
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Allow building the PIT driver when COMPILE_TEST is enabled. Also remove its
-default value so it can be disabled.
-
-Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/clocksource/Kconfig | 5 ++++-
- 1 file changed, 4 insertions(+), 1 deletion(-)
-
---- a/drivers/clocksource/Kconfig
-+++ b/drivers/clocksource/Kconfig
-@@ -398,8 +398,11 @@ config ARMV7M_SYSTICK
- This options enables support for the ARMv7M system timer unit
-
- config ATMEL_PIT
-+ bool "Atmel PIT support" if COMPILE_TEST
-+ depends on HAS_IOMEM
- select TIMER_OF if OF
-- def_bool SOC_AT91SAM9 || SOC_SAMA5
-+ help
-+ Support for the Periodic Interval Timer found on Atmel SoCs.
-
- config ATMEL_ST
- bool "Atmel ST timer support" if COMPILE_TEST
diff --git a/debian/patches-rt/0006-printk-rb-add-blocking-reader-support.patch b/debian/patches-rt/0006-printk-rb-add-blocking-reader-support.patch
index 13b219cc5..68f4ae27c 100644
--- a/debian/patches-rt/0006-printk-rb-add-blocking-reader-support.patch
+++ b/debian/patches-rt/0006-printk-rb-add-blocking-reader-support.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:44 +0100
Subject: [PATCH 06/25] printk-rb: add blocking reader support
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Add a blocking read function for readers. An irq_work function is
used to signal the wait queue so that write notification can
diff --git a/debian/patches-rt/0006-x86-fpu-Don-t-save-fxregs-for-ia32-frames-in-copy_fp.patch b/debian/patches-rt/0006-x86-fpu-Don-t-save-fxregs-for-ia32-frames-in-copy_fp.patch
deleted file mode 100644
index 983c3cb79..000000000
--- a/debian/patches-rt/0006-x86-fpu-Don-t-save-fxregs-for-ia32-frames-in-copy_fp.patch
+++ /dev/null
@@ -1,83 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 27 Nov 2018 13:08:50 +0100
-Subject: [PATCH 06/27] x86/fpu: Don't save fxregs for ia32 frames in
- copy_fpstate_to_sigframe()
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-In commit
-
- 72a671ced66db ("x86, fpu: Unify signal handling code paths for x86 and x86_64 kernels")
-
-the 32bit and 64bit path of the signal delivery code were merged. The 32bit version:
-|int save_i387_xstate_ia32(void __user *buf)
-|…
-| if (cpu_has_xsave)
-| return save_i387_xsave(fp);
-| if (cpu_has_fxsr)
-| return save_i387_fxsave(fp);
-
-The 64bit version:
-|int save_i387_xstate(void __user *buf)
-|…
-| if (user_has_fpu()) {
-| if (use_xsave())
-| err = xsave_user(buf);
-| else
-| err = fxsave_user(buf);
-|
-| if (unlikely(err)) {
-| __clear_user(buf, xstate_size);
-| return err;
-
-The merge:
-|int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
-|…
-| if (user_has_fpu()) {
-| /* Save the live register state to the user directly. */
-| if (save_user_xstate(buf_fx))
-| return -1;
-| /* Update the thread's fxstate to save the fsave header. */
-| if (ia32_fxstate)
-| fpu_fxsave(&tsk->thread.fpu);
-
-I don't think that we needed to save the FPU registers to ->thread.fpu because
-the registers were stored in `buf_fx'. Today the state will be restored from
-`buf_fx' after the signal was handled (I assume that this was also the case
-with lazy-FPU). Since commit
-
- 66463db4fc560 ("x86, fpu: shift drop_init_fpu() from save_xstate_sig() to handle_signal()")
-
-it is ensured that the signal handler starts with clear/fresh set of FPU
-registers which means that the previous store is futile.
-
-Remove copy_fxregs_to_kernel() because task's FPU state is cleared later in
-handle_signal() via fpu__clear().
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/fpu/signal.c | 4 ----
- 1 file changed, 4 deletions(-)
-
---- a/arch/x86/kernel/fpu/signal.c
-+++ b/arch/x86/kernel/fpu/signal.c
-@@ -155,7 +155,6 @@ static inline int copy_fpregs_to_sigfram
- */
- int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
- {
-- struct fpu *fpu = &current->thread.fpu;
- struct task_struct *tsk = current;
- int ia32_fxstate = (buf != buf_fx);
-
-@@ -173,9 +172,6 @@ int copy_fpstate_to_sigframe(void __user
- /* Save the live register state to the user directly. */
- if (copy_fpregs_to_sigframe(buf_fx))
- return -1;
-- /* Update the thread's fxstate to save the fsave header. */
-- if (ia32_fxstate)
-- copy_fxregs_to_kernel(fpu);
-
- /* Save the fsave header for the 32-bit frames. */
- if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
diff --git a/debian/patches-rt/0007-clocksource-drivers-tcb_clksrc-Rename-the-file-for-c.patch b/debian/patches-rt/0007-clocksource-drivers-tcb_clksrc-Rename-the-file-for-c.patch
deleted file mode 100644
index 7e24c64c6..000000000
--- a/debian/patches-rt/0007-clocksource-drivers-tcb_clksrc-Rename-the-file-for-c.patch
+++ /dev/null
@@ -1,990 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Date: Fri, 26 Apr 2019 23:47:16 +0200
-Subject: [PATCH 07/10] clocksource/drivers/tcb_clksrc: Rename the file for
- consistency
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-For the sake of consistency, let's rename the file to a name similar
-to other file names in this directory.
-
-Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/clocksource/Makefile | 2
- drivers/clocksource/tcb_clksrc.c | 477 ----------------------------------
- drivers/clocksource/timer-atmel-tcb.c | 477 ++++++++++++++++++++++++++++++++++
- 3 files changed, 478 insertions(+), 478 deletions(-)
- delete mode 100644 drivers/clocksource/tcb_clksrc.c
- create mode 100644 drivers/clocksource/timer-atmel-tcb.c
-
---- a/drivers/clocksource/Makefile
-+++ b/drivers/clocksource/Makefile
-@@ -3,7 +3,7 @@ obj-$(CONFIG_TIMER_OF) += timer-of.o
- obj-$(CONFIG_TIMER_PROBE) += timer-probe.o
- obj-$(CONFIG_ATMEL_PIT) += timer-atmel-pit.o
- obj-$(CONFIG_ATMEL_ST) += timer-atmel-st.o
--obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
-+obj-$(CONFIG_ATMEL_TCB_CLKSRC) += timer-atmel-tcb.o
- obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
- obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
- obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
---- a/drivers/clocksource/tcb_clksrc.c
-+++ /dev/null
-@@ -1,477 +0,0 @@
--// SPDX-License-Identifier: GPL-2.0
--#include <linux/init.h>
--#include <linux/clocksource.h>
--#include <linux/clockchips.h>
--#include <linux/interrupt.h>
--#include <linux/irq.h>
--
--#include <linux/clk.h>
--#include <linux/err.h>
--#include <linux/ioport.h>
--#include <linux/io.h>
--#include <linux/of_address.h>
--#include <linux/of_irq.h>
--#include <linux/sched_clock.h>
--#include <linux/syscore_ops.h>
--#include <soc/at91/atmel_tcb.h>
--
--
--/*
-- * We're configured to use a specific TC block, one that's not hooked
-- * up to external hardware, to provide a time solution:
-- *
-- * - Two channels combine to create a free-running 32 bit counter
-- * with a base rate of 5+ MHz, packaged as a clocksource (with
-- * resolution better than 200 nsec).
-- * - Some chips support 32 bit counter. A single channel is used for
-- * this 32 bit free-running counter. the second channel is not used.
-- *
-- * - The third channel may be used to provide a 16-bit clockevent
-- * source, used in either periodic or oneshot mode. This runs
-- * at 32 KiHZ, and can handle delays of up to two seconds.
-- *
-- * REVISIT behavior during system suspend states... we should disable
-- * all clocks and save the power. Easily done for clockevent devices,
-- * but clocksources won't necessarily get the needed notifications.
-- * For deeper system sleep states, this will be mandatory...
-- */
--
--static void __iomem *tcaddr;
--static struct
--{
-- u32 cmr;
-- u32 imr;
-- u32 rc;
-- bool clken;
--} tcb_cache[3];
--static u32 bmr_cache;
--
--static u64 tc_get_cycles(struct clocksource *cs)
--{
-- unsigned long flags;
-- u32 lower, upper;
--
-- raw_local_irq_save(flags);
-- do {
-- upper = readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV));
-- lower = readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
-- } while (upper != readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV)));
--
-- raw_local_irq_restore(flags);
-- return (upper << 16) | lower;
--}
--
--static u64 tc_get_cycles32(struct clocksource *cs)
--{
-- return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
--}
--
--void tc_clksrc_suspend(struct clocksource *cs)
--{
-- int i;
--
-- for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
-- tcb_cache[i].cmr = readl(tcaddr + ATMEL_TC_REG(i, CMR));
-- tcb_cache[i].imr = readl(tcaddr + ATMEL_TC_REG(i, IMR));
-- tcb_cache[i].rc = readl(tcaddr + ATMEL_TC_REG(i, RC));
-- tcb_cache[i].clken = !!(readl(tcaddr + ATMEL_TC_REG(i, SR)) &
-- ATMEL_TC_CLKSTA);
-- }
--
-- bmr_cache = readl(tcaddr + ATMEL_TC_BMR);
--}
--
--void tc_clksrc_resume(struct clocksource *cs)
--{
-- int i;
--
-- for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
-- /* Restore registers for the channel, RA and RB are not used */
-- writel(tcb_cache[i].cmr, tcaddr + ATMEL_TC_REG(i, CMR));
-- writel(tcb_cache[i].rc, tcaddr + ATMEL_TC_REG(i, RC));
-- writel(0, tcaddr + ATMEL_TC_REG(i, RA));
-- writel(0, tcaddr + ATMEL_TC_REG(i, RB));
-- /* Disable all the interrupts */
-- writel(0xff, tcaddr + ATMEL_TC_REG(i, IDR));
-- /* Reenable interrupts that were enabled before suspending */
-- writel(tcb_cache[i].imr, tcaddr + ATMEL_TC_REG(i, IER));
-- /* Start the clock if it was used */
-- if (tcb_cache[i].clken)
-- writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(i, CCR));
-- }
--
-- /* Dual channel, chain channels */
-- writel(bmr_cache, tcaddr + ATMEL_TC_BMR);
-- /* Finally, trigger all the channels*/
-- writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
--}
--
--static struct clocksource clksrc = {
-- .rating = 200,
-- .read = tc_get_cycles,
-- .mask = CLOCKSOURCE_MASK(32),
-- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-- .suspend = tc_clksrc_suspend,
-- .resume = tc_clksrc_resume,
--};
--
--static u64 notrace tc_sched_clock_read(void)
--{
-- return tc_get_cycles(&clksrc);
--}
--
--static u64 notrace tc_sched_clock_read32(void)
--{
-- return tc_get_cycles32(&clksrc);
--}
--
--#ifdef CONFIG_GENERIC_CLOCKEVENTS
--
--struct tc_clkevt_device {
-- struct clock_event_device clkevt;
-- struct clk *clk;
-- void __iomem *regs;
--};
--
--static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
--{
-- return container_of(clkevt, struct tc_clkevt_device, clkevt);
--}
--
--/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
-- * because using one of the divided clocks would usually mean the
-- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
-- *
-- * A divided clock could be good for high resolution timers, since
-- * 30.5 usec resolution can seem "low".
-- */
--static u32 timer_clock;
--
--static int tc_shutdown(struct clock_event_device *d)
--{
-- struct tc_clkevt_device *tcd = to_tc_clkevt(d);
-- void __iomem *regs = tcd->regs;
--
-- writel(0xff, regs + ATMEL_TC_REG(2, IDR));
-- writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
-- if (!clockevent_state_detached(d))
-- clk_disable(tcd->clk);
--
-- return 0;
--}
--
--static int tc_set_oneshot(struct clock_event_device *d)
--{
-- struct tc_clkevt_device *tcd = to_tc_clkevt(d);
-- void __iomem *regs = tcd->regs;
--
-- if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
-- tc_shutdown(d);
--
-- clk_enable(tcd->clk);
--
-- /* slow clock, count up to RC, then irq and stop */
-- writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
-- ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
-- writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
--
-- /* set_next_event() configures and starts the timer */
-- return 0;
--}
--
--static int tc_set_periodic(struct clock_event_device *d)
--{
-- struct tc_clkevt_device *tcd = to_tc_clkevt(d);
-- void __iomem *regs = tcd->regs;
--
-- if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
-- tc_shutdown(d);
--
-- /* By not making the gentime core emulate periodic mode on top
-- * of oneshot, we get lower overhead and improved accuracy.
-- */
-- clk_enable(tcd->clk);
--
-- /* slow clock, count up to RC, then irq and restart */
-- writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
-- regs + ATMEL_TC_REG(2, CMR));
-- writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
--
-- /* Enable clock and interrupts on RC compare */
-- writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
--
-- /* go go gadget! */
-- writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs +
-- ATMEL_TC_REG(2, CCR));
-- return 0;
--}
--
--static int tc_next_event(unsigned long delta, struct clock_event_device *d)
--{
-- writel_relaxed(delta, tcaddr + ATMEL_TC_REG(2, RC));
--
-- /* go go gadget! */
-- writel_relaxed(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
-- tcaddr + ATMEL_TC_REG(2, CCR));
-- return 0;
--}
--
--static struct tc_clkevt_device clkevt = {
-- .clkevt = {
-- .features = CLOCK_EVT_FEAT_PERIODIC |
-- CLOCK_EVT_FEAT_ONESHOT,
-- /* Should be lower than at91rm9200's system timer */
-- .rating = 125,
-- .set_next_event = tc_next_event,
-- .set_state_shutdown = tc_shutdown,
-- .set_state_periodic = tc_set_periodic,
-- .set_state_oneshot = tc_set_oneshot,
-- },
--};
--
--static irqreturn_t ch2_irq(int irq, void *handle)
--{
-- struct tc_clkevt_device *dev = handle;
-- unsigned int sr;
--
-- sr = readl_relaxed(dev->regs + ATMEL_TC_REG(2, SR));
-- if (sr & ATMEL_TC_CPCS) {
-- dev->clkevt.event_handler(&dev->clkevt);
-- return IRQ_HANDLED;
-- }
--
-- return IRQ_NONE;
--}
--
--static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
--{
-- int ret;
-- struct clk *t2_clk = tc->clk[2];
-- int irq = tc->irq[2];
--
-- ret = clk_prepare_enable(tc->slow_clk);
-- if (ret)
-- return ret;
--
-- /* try to enable t2 clk to avoid future errors in mode change */
-- ret = clk_prepare_enable(t2_clk);
-- if (ret) {
-- clk_disable_unprepare(tc->slow_clk);
-- return ret;
-- }
--
-- clk_disable(t2_clk);
--
-- clkevt.regs = tc->regs;
-- clkevt.clk = t2_clk;
--
-- timer_clock = clk32k_divisor_idx;
--
-- clkevt.clkevt.cpumask = cpumask_of(0);
--
-- ret = request_irq(irq, ch2_irq, IRQF_TIMER, "tc_clkevt", &clkevt);
-- if (ret) {
-- clk_unprepare(t2_clk);
-- clk_disable_unprepare(tc->slow_clk);
-- return ret;
-- }
--
-- clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
--
-- return ret;
--}
--
--#else /* !CONFIG_GENERIC_CLOCKEVENTS */
--
--static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
--{
-- /* NOTHING */
-- return 0;
--}
--
--#endif
--
--static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
--{
-- /* channel 0: waveform mode, input mclk/8, clock TIOA0 on overflow */
-- writel(mck_divisor_idx /* likely divide-by-8 */
-- | ATMEL_TC_WAVE
-- | ATMEL_TC_WAVESEL_UP /* free-run */
-- | ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */
-- | ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */
-- tcaddr + ATMEL_TC_REG(0, CMR));
-- writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA));
-- writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC));
-- writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
-- writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
--
-- /* channel 1: waveform mode, input TIOA0 */
-- writel(ATMEL_TC_XC1 /* input: TIOA0 */
-- | ATMEL_TC_WAVE
-- | ATMEL_TC_WAVESEL_UP, /* free-run */
-- tcaddr + ATMEL_TC_REG(1, CMR));
-- writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR)); /* no irqs */
-- writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR));
--
-- /* chain channel 0 to channel 1*/
-- writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR);
-- /* then reset all the timers */
-- writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
--}
--
--static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx)
--{
-- /* channel 0: waveform mode, input mclk/8 */
-- writel(mck_divisor_idx /* likely divide-by-8 */
-- | ATMEL_TC_WAVE
-- | ATMEL_TC_WAVESEL_UP, /* free-run */
-- tcaddr + ATMEL_TC_REG(0, CMR));
-- writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
-- writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
--
-- /* then reset all the timers */
-- writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
--}
--
--static const u8 atmel_tcb_divisors[5] = { 2, 8, 32, 128, 0, };
--
--static const struct of_device_id atmel_tcb_of_match[] = {
-- { .compatible = "atmel,at91rm9200-tcb", .data = (void *)16, },
-- { .compatible = "atmel,at91sam9x5-tcb", .data = (void *)32, },
-- { /* sentinel */ }
--};
--
--static int __init tcb_clksrc_init(struct device_node *node)
--{
-- struct atmel_tc tc;
-- struct clk *t0_clk;
-- const struct of_device_id *match;
-- u64 (*tc_sched_clock)(void);
-- u32 rate, divided_rate = 0;
-- int best_divisor_idx = -1;
-- int clk32k_divisor_idx = -1;
-- int bits;
-- int i;
-- int ret;
--
-- /* Protect against multiple calls */
-- if (tcaddr)
-- return 0;
--
-- tc.regs = of_iomap(node->parent, 0);
-- if (!tc.regs)
-- return -ENXIO;
--
-- t0_clk = of_clk_get_by_name(node->parent, "t0_clk");
-- if (IS_ERR(t0_clk))
-- return PTR_ERR(t0_clk);
--
-- tc.slow_clk = of_clk_get_by_name(node->parent, "slow_clk");
-- if (IS_ERR(tc.slow_clk))
-- return PTR_ERR(tc.slow_clk);
--
-- tc.clk[0] = t0_clk;
-- tc.clk[1] = of_clk_get_by_name(node->parent, "t1_clk");
-- if (IS_ERR(tc.clk[1]))
-- tc.clk[1] = t0_clk;
-- tc.clk[2] = of_clk_get_by_name(node->parent, "t2_clk");
-- if (IS_ERR(tc.clk[2]))
-- tc.clk[2] = t0_clk;
--
-- tc.irq[2] = of_irq_get(node->parent, 2);
-- if (tc.irq[2] <= 0) {
-- tc.irq[2] = of_irq_get(node->parent, 0);
-- if (tc.irq[2] <= 0)
-- return -EINVAL;
-- }
--
-- match = of_match_node(atmel_tcb_of_match, node->parent);
-- bits = (uintptr_t)match->data;
--
-- for (i = 0; i < ARRAY_SIZE(tc.irq); i++)
-- writel(ATMEL_TC_ALL_IRQ, tc.regs + ATMEL_TC_REG(i, IDR));
--
-- ret = clk_prepare_enable(t0_clk);
-- if (ret) {
-- pr_debug("can't enable T0 clk\n");
-- return ret;
-- }
--
-- /* How fast will we be counting? Pick something over 5 MHz. */
-- rate = (u32) clk_get_rate(t0_clk);
-- for (i = 0; i < ARRAY_SIZE(atmel_tcb_divisors); i++) {
-- unsigned divisor = atmel_tcb_divisors[i];
-- unsigned tmp;
--
-- /* remember 32 KiHz clock for later */
-- if (!divisor) {
-- clk32k_divisor_idx = i;
-- continue;
-- }
--
-- tmp = rate / divisor;
-- pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp);
-- if (best_divisor_idx > 0) {
-- if (tmp < 5 * 1000 * 1000)
-- continue;
-- }
-- divided_rate = tmp;
-- best_divisor_idx = i;
-- }
--
-- clksrc.name = kbasename(node->parent->full_name);
-- clkevt.clkevt.name = kbasename(node->parent->full_name);
-- pr_debug("%s at %d.%03d MHz\n", clksrc.name, divided_rate / 1000000,
-- ((divided_rate % 1000000) + 500) / 1000);
--
-- tcaddr = tc.regs;
--
-- if (bits == 32) {
-- /* use apropriate function to read 32 bit counter */
-- clksrc.read = tc_get_cycles32;
-- /* setup ony channel 0 */
-- tcb_setup_single_chan(&tc, best_divisor_idx);
-- tc_sched_clock = tc_sched_clock_read32;
-- } else {
-- /* we have three clocks no matter what the
-- * underlying platform supports.
-- */
-- ret = clk_prepare_enable(tc.clk[1]);
-- if (ret) {
-- pr_debug("can't enable T1 clk\n");
-- goto err_disable_t0;
-- }
-- /* setup both channel 0 & 1 */
-- tcb_setup_dual_chan(&tc, best_divisor_idx);
-- tc_sched_clock = tc_sched_clock_read;
-- }
--
-- /* and away we go! */
-- ret = clocksource_register_hz(&clksrc, divided_rate);
-- if (ret)
-- goto err_disable_t1;
--
-- /* channel 2: periodic and oneshot timer support */
-- ret = setup_clkevents(&tc, clk32k_divisor_idx);
-- if (ret)
-- goto err_unregister_clksrc;
--
-- sched_clock_register(tc_sched_clock, 32, divided_rate);
--
-- return 0;
--
--err_unregister_clksrc:
-- clocksource_unregister(&clksrc);
--
--err_disable_t1:
-- if (bits != 32)
-- clk_disable_unprepare(tc.clk[1]);
--
--err_disable_t0:
-- clk_disable_unprepare(t0_clk);
--
-- tcaddr = NULL;
--
-- return ret;
--}
--TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init);
---- /dev/null
-+++ b/drivers/clocksource/timer-atmel-tcb.c
-@@ -0,0 +1,477 @@
-+// SPDX-License-Identifier: GPL-2.0
-+#include <linux/init.h>
-+#include <linux/clocksource.h>
-+#include <linux/clockchips.h>
-+#include <linux/interrupt.h>
-+#include <linux/irq.h>
-+
-+#include <linux/clk.h>
-+#include <linux/err.h>
-+#include <linux/ioport.h>
-+#include <linux/io.h>
-+#include <linux/of_address.h>
-+#include <linux/of_irq.h>
-+#include <linux/sched_clock.h>
-+#include <linux/syscore_ops.h>
-+#include <soc/at91/atmel_tcb.h>
-+
-+
-+/*
-+ * We're configured to use a specific TC block, one that's not hooked
-+ * up to external hardware, to provide a time solution:
-+ *
-+ * - Two channels combine to create a free-running 32 bit counter
-+ * with a base rate of 5+ MHz, packaged as a clocksource (with
-+ * resolution better than 200 nsec).
-+ * - Some chips support 32 bit counter. A single channel is used for
-+ * this 32 bit free-running counter. the second channel is not used.
-+ *
-+ * - The third channel may be used to provide a 16-bit clockevent
-+ * source, used in either periodic or oneshot mode. This runs
-+ * at 32 KiHZ, and can handle delays of up to two seconds.
-+ *
-+ * REVISIT behavior during system suspend states... we should disable
-+ * all clocks and save the power. Easily done for clockevent devices,
-+ * but clocksources won't necessarily get the needed notifications.
-+ * For deeper system sleep states, this will be mandatory...
-+ */
-+
-+static void __iomem *tcaddr;
-+static struct
-+{
-+ u32 cmr;
-+ u32 imr;
-+ u32 rc;
-+ bool clken;
-+} tcb_cache[3];
-+static u32 bmr_cache;
-+
-+static u64 tc_get_cycles(struct clocksource *cs)
-+{
-+ unsigned long flags;
-+ u32 lower, upper;
-+
-+ raw_local_irq_save(flags);
-+ do {
-+ upper = readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV));
-+ lower = readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
-+ } while (upper != readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV)));
-+
-+ raw_local_irq_restore(flags);
-+ return (upper << 16) | lower;
-+}
-+
-+static u64 tc_get_cycles32(struct clocksource *cs)
-+{
-+ return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
-+}
-+
-+void tc_clksrc_suspend(struct clocksource *cs)
-+{
-+ int i;
-+
-+ for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
-+ tcb_cache[i].cmr = readl(tcaddr + ATMEL_TC_REG(i, CMR));
-+ tcb_cache[i].imr = readl(tcaddr + ATMEL_TC_REG(i, IMR));
-+ tcb_cache[i].rc = readl(tcaddr + ATMEL_TC_REG(i, RC));
-+ tcb_cache[i].clken = !!(readl(tcaddr + ATMEL_TC_REG(i, SR)) &
-+ ATMEL_TC_CLKSTA);
-+ }
-+
-+ bmr_cache = readl(tcaddr + ATMEL_TC_BMR);
-+}
-+
-+void tc_clksrc_resume(struct clocksource *cs)
-+{
-+ int i;
-+
-+ for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
-+ /* Restore registers for the channel, RA and RB are not used */
-+ writel(tcb_cache[i].cmr, tcaddr + ATMEL_TC_REG(i, CMR));
-+ writel(tcb_cache[i].rc, tcaddr + ATMEL_TC_REG(i, RC));
-+ writel(0, tcaddr + ATMEL_TC_REG(i, RA));
-+ writel(0, tcaddr + ATMEL_TC_REG(i, RB));
-+ /* Disable all the interrupts */
-+ writel(0xff, tcaddr + ATMEL_TC_REG(i, IDR));
-+ /* Reenable interrupts that were enabled before suspending */
-+ writel(tcb_cache[i].imr, tcaddr + ATMEL_TC_REG(i, IER));
-+ /* Start the clock if it was used */
-+ if (tcb_cache[i].clken)
-+ writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(i, CCR));
-+ }
-+
-+ /* Dual channel, chain channels */
-+ writel(bmr_cache, tcaddr + ATMEL_TC_BMR);
-+ /* Finally, trigger all the channels*/
-+ writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
-+}
-+
-+static struct clocksource clksrc = {
-+ .rating = 200,
-+ .read = tc_get_cycles,
-+ .mask = CLOCKSOURCE_MASK(32),
-+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-+ .suspend = tc_clksrc_suspend,
-+ .resume = tc_clksrc_resume,
-+};
-+
-+static u64 notrace tc_sched_clock_read(void)
-+{
-+ return tc_get_cycles(&clksrc);
-+}
-+
-+static u64 notrace tc_sched_clock_read32(void)
-+{
-+ return tc_get_cycles32(&clksrc);
-+}
-+
-+#ifdef CONFIG_GENERIC_CLOCKEVENTS
-+
-+struct tc_clkevt_device {
-+ struct clock_event_device clkevt;
-+ struct clk *clk;
-+ void __iomem *regs;
-+};
-+
-+static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
-+{
-+ return container_of(clkevt, struct tc_clkevt_device, clkevt);
-+}
-+
-+/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
-+ * because using one of the divided clocks would usually mean the
-+ * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
-+ *
-+ * A divided clock could be good for high resolution timers, since
-+ * 30.5 usec resolution can seem "low".
-+ */
-+static u32 timer_clock;
-+
-+static int tc_shutdown(struct clock_event_device *d)
-+{
-+ struct tc_clkevt_device *tcd = to_tc_clkevt(d);
-+ void __iomem *regs = tcd->regs;
-+
-+ writel(0xff, regs + ATMEL_TC_REG(2, IDR));
-+ writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
-+ if (!clockevent_state_detached(d))
-+ clk_disable(tcd->clk);
-+
-+ return 0;
-+}
-+
-+static int tc_set_oneshot(struct clock_event_device *d)
-+{
-+ struct tc_clkevt_device *tcd = to_tc_clkevt(d);
-+ void __iomem *regs = tcd->regs;
-+
-+ if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
-+ tc_shutdown(d);
-+
-+ clk_enable(tcd->clk);
-+
-+ /* slow clock, count up to RC, then irq and stop */
-+ writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
-+ ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
-+ writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
-+
-+ /* set_next_event() configures and starts the timer */
-+ return 0;
-+}
-+
-+static int tc_set_periodic(struct clock_event_device *d)
-+{
-+ struct tc_clkevt_device *tcd = to_tc_clkevt(d);
-+ void __iomem *regs = tcd->regs;
-+
-+ if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
-+ tc_shutdown(d);
-+
-+ /* By not making the gentime core emulate periodic mode on top
-+ * of oneshot, we get lower overhead and improved accuracy.
-+ */
-+ clk_enable(tcd->clk);
-+
-+ /* slow clock, count up to RC, then irq and restart */
-+ writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
-+ regs + ATMEL_TC_REG(2, CMR));
-+ writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
-+
-+ /* Enable clock and interrupts on RC compare */
-+ writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
-+
-+ /* go go gadget! */
-+ writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs +
-+ ATMEL_TC_REG(2, CCR));
-+ return 0;
-+}
-+
-+static int tc_next_event(unsigned long delta, struct clock_event_device *d)
-+{
-+ writel_relaxed(delta, tcaddr + ATMEL_TC_REG(2, RC));
-+
-+ /* go go gadget! */
-+ writel_relaxed(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
-+ tcaddr + ATMEL_TC_REG(2, CCR));
-+ return 0;
-+}
-+
-+static struct tc_clkevt_device clkevt = {
-+ .clkevt = {
-+ .features = CLOCK_EVT_FEAT_PERIODIC |
-+ CLOCK_EVT_FEAT_ONESHOT,
-+ /* Should be lower than at91rm9200's system timer */
-+ .rating = 125,
-+ .set_next_event = tc_next_event,
-+ .set_state_shutdown = tc_shutdown,
-+ .set_state_periodic = tc_set_periodic,
-+ .set_state_oneshot = tc_set_oneshot,
-+ },
-+};
-+
-+static irqreturn_t ch2_irq(int irq, void *handle)
-+{
-+ struct tc_clkevt_device *dev = handle;
-+ unsigned int sr;
-+
-+ sr = readl_relaxed(dev->regs + ATMEL_TC_REG(2, SR));
-+ if (sr & ATMEL_TC_CPCS) {
-+ dev->clkevt.event_handler(&dev->clkevt);
-+ return IRQ_HANDLED;
-+ }
-+
-+ return IRQ_NONE;
-+}
-+
-+static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
-+{
-+ int ret;
-+ struct clk *t2_clk = tc->clk[2];
-+ int irq = tc->irq[2];
-+
-+ ret = clk_prepare_enable(tc->slow_clk);
-+ if (ret)
-+ return ret;
-+
-+ /* try to enable t2 clk to avoid future errors in mode change */
-+ ret = clk_prepare_enable(t2_clk);
-+ if (ret) {
-+ clk_disable_unprepare(tc->slow_clk);
-+ return ret;
-+ }
-+
-+ clk_disable(t2_clk);
-+
-+ clkevt.regs = tc->regs;
-+ clkevt.clk = t2_clk;
-+
-+ timer_clock = clk32k_divisor_idx;
-+
-+ clkevt.clkevt.cpumask = cpumask_of(0);
-+
-+ ret = request_irq(irq, ch2_irq, IRQF_TIMER, "tc_clkevt", &clkevt);
-+ if (ret) {
-+ clk_unprepare(t2_clk);
-+ clk_disable_unprepare(tc->slow_clk);
-+ return ret;
-+ }
-+
-+ clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
-+
-+ return ret;
-+}
-+
-+#else /* !CONFIG_GENERIC_CLOCKEVENTS */
-+
-+static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
-+{
-+ /* NOTHING */
-+ return 0;
-+}
-+
-+#endif
-+
-+static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
-+{
-+ /* channel 0: waveform mode, input mclk/8, clock TIOA0 on overflow */
-+ writel(mck_divisor_idx /* likely divide-by-8 */
-+ | ATMEL_TC_WAVE
-+ | ATMEL_TC_WAVESEL_UP /* free-run */
-+ | ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */
-+ | ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */
-+ tcaddr + ATMEL_TC_REG(0, CMR));
-+ writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA));
-+ writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC));
-+ writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
-+ writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
-+
-+ /* channel 1: waveform mode, input TIOA0 */
-+ writel(ATMEL_TC_XC1 /* input: TIOA0 */
-+ | ATMEL_TC_WAVE
-+ | ATMEL_TC_WAVESEL_UP, /* free-run */
-+ tcaddr + ATMEL_TC_REG(1, CMR));
-+ writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR)); /* no irqs */
-+ writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR));
-+
-+ /* chain channel 0 to channel 1*/
-+ writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR);
-+ /* then reset all the timers */
-+ writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
-+}
-+
-+static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx)
-+{
-+ /* channel 0: waveform mode, input mclk/8 */
-+ writel(mck_divisor_idx /* likely divide-by-8 */
-+ | ATMEL_TC_WAVE
-+ | ATMEL_TC_WAVESEL_UP, /* free-run */
-+ tcaddr + ATMEL_TC_REG(0, CMR));
-+ writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
-+ writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
-+
-+ /* then reset all the timers */
-+ writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
-+}
-+
-+static const u8 atmel_tcb_divisors[5] = { 2, 8, 32, 128, 0, };
-+
-+static const struct of_device_id atmel_tcb_of_match[] = {
-+ { .compatible = "atmel,at91rm9200-tcb", .data = (void *)16, },
-+ { .compatible = "atmel,at91sam9x5-tcb", .data = (void *)32, },
-+ { /* sentinel */ }
-+};
-+
-+static int __init tcb_clksrc_init(struct device_node *node)
-+{
-+ struct atmel_tc tc;
-+ struct clk *t0_clk;
-+ const struct of_device_id *match;
-+ u64 (*tc_sched_clock)(void);
-+ u32 rate, divided_rate = 0;
-+ int best_divisor_idx = -1;
-+ int clk32k_divisor_idx = -1;
-+ int bits;
-+ int i;
-+ int ret;
-+
-+ /* Protect against multiple calls */
-+ if (tcaddr)
-+ return 0;
-+
-+ tc.regs = of_iomap(node->parent, 0);
-+ if (!tc.regs)
-+ return -ENXIO;
-+
-+ t0_clk = of_clk_get_by_name(node->parent, "t0_clk");
-+ if (IS_ERR(t0_clk))
-+ return PTR_ERR(t0_clk);
-+
-+ tc.slow_clk = of_clk_get_by_name(node->parent, "slow_clk");
-+ if (IS_ERR(tc.slow_clk))
-+ return PTR_ERR(tc.slow_clk);
-+
-+ tc.clk[0] = t0_clk;
-+ tc.clk[1] = of_clk_get_by_name(node->parent, "t1_clk");
-+ if (IS_ERR(tc.clk[1]))
-+ tc.clk[1] = t0_clk;
-+ tc.clk[2] = of_clk_get_by_name(node->parent, "t2_clk");
-+ if (IS_ERR(tc.clk[2]))
-+ tc.clk[2] = t0_clk;
-+
-+ tc.irq[2] = of_irq_get(node->parent, 2);
-+ if (tc.irq[2] <= 0) {
-+ tc.irq[2] = of_irq_get(node->parent, 0);
-+ if (tc.irq[2] <= 0)
-+ return -EINVAL;
-+ }
-+
-+ match = of_match_node(atmel_tcb_of_match, node->parent);
-+ bits = (uintptr_t)match->data;
-+
-+ for (i = 0; i < ARRAY_SIZE(tc.irq); i++)
-+ writel(ATMEL_TC_ALL_IRQ, tc.regs + ATMEL_TC_REG(i, IDR));
-+
-+ ret = clk_prepare_enable(t0_clk);
-+ if (ret) {
-+ pr_debug("can't enable T0 clk\n");
-+ return ret;
-+ }
-+
-+ /* How fast will we be counting? Pick something over 5 MHz. */
-+ rate = (u32) clk_get_rate(t0_clk);
-+ for (i = 0; i < ARRAY_SIZE(atmel_tcb_divisors); i++) {
-+ unsigned divisor = atmel_tcb_divisors[i];
-+ unsigned tmp;
-+
-+ /* remember 32 KiHz clock for later */
-+ if (!divisor) {
-+ clk32k_divisor_idx = i;
-+ continue;
-+ }
-+
-+ tmp = rate / divisor;
-+ pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp);
-+ if (best_divisor_idx > 0) {
-+ if (tmp < 5 * 1000 * 1000)
-+ continue;
-+ }
-+ divided_rate = tmp;
-+ best_divisor_idx = i;
-+ }
-+
-+ clksrc.name = kbasename(node->parent->full_name);
-+ clkevt.clkevt.name = kbasename(node->parent->full_name);
-+ pr_debug("%s at %d.%03d MHz\n", clksrc.name, divided_rate / 1000000,
-+ ((divided_rate % 1000000) + 500) / 1000);
-+
-+ tcaddr = tc.regs;
-+
-+ if (bits == 32) {
-+ /* use apropriate function to read 32 bit counter */
-+ clksrc.read = tc_get_cycles32;
-+ /* setup ony channel 0 */
-+ tcb_setup_single_chan(&tc, best_divisor_idx);
-+ tc_sched_clock = tc_sched_clock_read32;
-+ } else {
-+ /* we have three clocks no matter what the
-+ * underlying platform supports.
-+ */
-+ ret = clk_prepare_enable(tc.clk[1]);
-+ if (ret) {
-+ pr_debug("can't enable T1 clk\n");
-+ goto err_disable_t0;
-+ }
-+ /* setup both channel 0 & 1 */
-+ tcb_setup_dual_chan(&tc, best_divisor_idx);
-+ tc_sched_clock = tc_sched_clock_read;
-+ }
-+
-+ /* and away we go! */
-+ ret = clocksource_register_hz(&clksrc, divided_rate);
-+ if (ret)
-+ goto err_disable_t1;
-+
-+ /* channel 2: periodic and oneshot timer support */
-+ ret = setup_clkevents(&tc, clk32k_divisor_idx);
-+ if (ret)
-+ goto err_unregister_clksrc;
-+
-+ sched_clock_register(tc_sched_clock, 32, divided_rate);
-+
-+ return 0;
-+
-+err_unregister_clksrc:
-+ clocksource_unregister(&clksrc);
-+
-+err_disable_t1:
-+ if (bits != 32)
-+ clk_disable_unprepare(tc.clk[1]);
-+
-+err_disable_t0:
-+ clk_disable_unprepare(t0_clk);
-+
-+ tcaddr = NULL;
-+
-+ return ret;
-+}
-+TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init);
diff --git a/debian/patches-rt/0007-printk-rb-add-functionality-required-by-printk.patch b/debian/patches-rt/0007-printk-rb-add-functionality-required-by-printk.patch
index 5be3c8977..51bf2cccf 100644
--- a/debian/patches-rt/0007-printk-rb-add-functionality-required-by-printk.patch
+++ b/debian/patches-rt/0007-printk-rb-add-functionality-required-by-printk.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:45 +0100
Subject: [PATCH 07/25] printk-rb: add functionality required by printk
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The printk subsystem needs to be able to query the size of the ring
buffer, seek to specific entries within the ring buffer, and track
diff --git a/debian/patches-rt/0007-x86-fpu-Remove-fpu-initialized.patch b/debian/patches-rt/0007-x86-fpu-Remove-fpu-initialized.patch
deleted file mode 100644
index 0073d26ff..000000000
--- a/debian/patches-rt/0007-x86-fpu-Remove-fpu-initialized.patch
+++ /dev/null
@@ -1,476 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 17 Oct 2018 18:08:35 +0200
-Subject: [PATCH 07/27] x86/fpu: Remove fpu->initialized
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-The `initialized' member of the fpu struct is always set to one for user
-tasks and zero for kernel tasks. This avoids saving/restoring the FPU
-registers for kernel threads.
-
-The ->initialized = 0 case for user tasks has been removed in previous changes
-for instance by always an explicit init at fork() time for FPU-less system which
-was otherwise delayed until the emulated opcode.
-
-The context switch code (switch_fpu_prepare() + switch_fpu_finish())
-can't unconditionally save/restore registers for kernel threads. Not only would
-it slow down switch but also load a zeroed xcomp_bv for the XSAVES.
-
-For kernel_fpu_begin() (+end) the situation is similar: EFI with runtime
-services uses this before alternatives_patched is true. Which means that this
-function is used too early and it wasn't the case before.
-
-For those two cases current->mm is used to determine between user &
-kernel thread. For kernel_fpu_begin() we skip save/restore of the FPU
-registers.
-During the context switch into a kernel thread we don't do anything.
-There is no reason to save the FPU state of a kernel thread.
-The reordering in __switch_to() is important because the current() pointer
-needs to be valid before switch_fpu_finish() is invoked so ->mm is seen of the
-new task instead the old one.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/ia32/ia32_signal.c | 17 +++-----
- arch/x86/include/asm/fpu/internal.h | 18 +++++----
- arch/x86/include/asm/fpu/types.h | 9 ----
- arch/x86/include/asm/trace/fpu.h | 5 --
- arch/x86/kernel/fpu/core.c | 72 ++++++++++--------------------------
- arch/x86/kernel/fpu/init.c | 2 -
- arch/x86/kernel/fpu/regset.c | 19 ++-------
- arch/x86/kernel/fpu/xstate.c | 2 -
- arch/x86/kernel/process_32.c | 4 +-
- arch/x86/kernel/process_64.c | 4 +-
- arch/x86/kernel/signal.c | 17 +++-----
- arch/x86/mm/pkeys.c | 7 ---
- 12 files changed, 54 insertions(+), 122 deletions(-)
-
---- a/arch/x86/ia32/ia32_signal.c
-+++ b/arch/x86/ia32/ia32_signal.c
-@@ -216,8 +216,7 @@ static void __user *get_sigframe(struct
- size_t frame_size,
- void __user **fpstate)
- {
-- struct fpu *fpu = &current->thread.fpu;
-- unsigned long sp;
-+ unsigned long sp, fx_aligned, math_size;
-
- /* Default to using normal stack */
- sp = regs->sp;
-@@ -231,15 +230,11 @@ static void __user *get_sigframe(struct
- ksig->ka.sa.sa_restorer)
- sp = (unsigned long) ksig->ka.sa.sa_restorer;
-
-- if (fpu->initialized) {
-- unsigned long fx_aligned, math_size;
--
-- sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size);
-- *fpstate = (struct _fpstate_32 __user *) sp;
-- if (copy_fpstate_to_sigframe(*fpstate, (void __user *)fx_aligned,
-- math_size) < 0)
-- return (void __user *) -1L;
-- }
-+ sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size);
-+ *fpstate = (struct _fpstate_32 __user *) sp;
-+ if (copy_fpstate_to_sigframe(*fpstate, (void __user *)fx_aligned,
-+ math_size) < 0)
-+ return (void __user *) -1L;
-
- sp -= frame_size;
- /* Align the stack pointer according to the i386 ABI,
---- a/arch/x86/include/asm/fpu/internal.h
-+++ b/arch/x86/include/asm/fpu/internal.h
-@@ -525,11 +525,14 @@ static inline void fpregs_activate(struc
- *
- * - switch_fpu_finish() restores the new state as
- * necessary.
-+ *
-+ * The FPU context is only stored/restore for user task and ->mm is used to
-+ * distinguish between kernel and user threads.
- */
- static inline void
- switch_fpu_prepare(struct fpu *old_fpu, int cpu)
- {
-- if (static_cpu_has(X86_FEATURE_FPU) && old_fpu->initialized) {
-+ if (static_cpu_has(X86_FEATURE_FPU) && current->mm) {
- if (!copy_fpregs_to_fpstate(old_fpu))
- old_fpu->last_cpu = -1;
- else
-@@ -537,8 +540,7 @@ switch_fpu_prepare(struct fpu *old_fpu,
-
- /* But leave fpu_fpregs_owner_ctx! */
- trace_x86_fpu_regs_deactivated(old_fpu);
-- } else
-- old_fpu->last_cpu = -1;
-+ }
- }
-
- /*
-@@ -551,12 +553,12 @@ switch_fpu_prepare(struct fpu *old_fpu,
- */
- static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
- {
-- bool preload = static_cpu_has(X86_FEATURE_FPU) &&
-- new_fpu->initialized;
-+ if (static_cpu_has(X86_FEATURE_FPU)) {
-+ if (!fpregs_state_valid(new_fpu, cpu)) {
-+ if (current->mm)
-+ copy_kernel_to_fpregs(&new_fpu->state);
-+ }
-
-- if (preload) {
-- if (!fpregs_state_valid(new_fpu, cpu))
-- copy_kernel_to_fpregs(&new_fpu->state);
- fpregs_activate(new_fpu);
- }
- }
---- a/arch/x86/include/asm/fpu/types.h
-+++ b/arch/x86/include/asm/fpu/types.h
-@@ -294,15 +294,6 @@ struct fpu {
- unsigned int last_cpu;
-
- /*
-- * @initialized:
-- *
-- * This flag indicates whether this context is initialized: if the task
-- * is not running then we can restore from this context, if the task
-- * is running then we should save into this context.
-- */
-- unsigned char initialized;
--
-- /*
- * @state:
- *
- * In-memory copy of all FPU registers that we save/restore
---- a/arch/x86/include/asm/trace/fpu.h
-+++ b/arch/x86/include/asm/trace/fpu.h
-@@ -13,22 +13,19 @@ DECLARE_EVENT_CLASS(x86_fpu,
-
- TP_STRUCT__entry(
- __field(struct fpu *, fpu)
-- __field(bool, initialized)
- __field(u64, xfeatures)
- __field(u64, xcomp_bv)
- ),
-
- TP_fast_assign(
- __entry->fpu = fpu;
-- __entry->initialized = fpu->initialized;
- if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
- __entry->xfeatures = fpu->state.xsave.header.xfeatures;
- __entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
- }
- ),
-- TP_printk("x86/fpu: %p initialized: %d xfeatures: %llx xcomp_bv: %llx",
-+ TP_printk("x86/fpu: %p xfeatures: %llx xcomp_bv: %llx",
- __entry->fpu,
-- __entry->initialized,
- __entry->xfeatures,
- __entry->xcomp_bv
- )
---- a/arch/x86/kernel/fpu/core.c
-+++ b/arch/x86/kernel/fpu/core.c
-@@ -101,7 +101,7 @@ static void __kernel_fpu_begin(void)
-
- kernel_fpu_disable();
-
-- if (fpu->initialized) {
-+ if (current->mm) {
- /*
- * Ignore return value -- we don't care if reg state
- * is clobbered.
-@@ -116,7 +116,7 @@ static void __kernel_fpu_end(void)
- {
- struct fpu *fpu = &current->thread.fpu;
-
-- if (fpu->initialized)
-+ if (current->mm)
- copy_kernel_to_fpregs(&fpu->state);
-
- kernel_fpu_enable();
-@@ -147,11 +147,10 @@ void fpu__save(struct fpu *fpu)
-
- preempt_disable();
- trace_x86_fpu_before_save(fpu);
-- if (fpu->initialized) {
-- if (!copy_fpregs_to_fpstate(fpu)) {
-- copy_kernel_to_fpregs(&fpu->state);
-- }
-- }
-+
-+ if (!copy_fpregs_to_fpstate(fpu))
-+ copy_kernel_to_fpregs(&fpu->state);
-+
- trace_x86_fpu_after_save(fpu);
- preempt_enable();
- }
-@@ -190,7 +189,7 @@ int fpu__copy(struct fpu *dst_fpu, struc
- {
- dst_fpu->last_cpu = -1;
-
-- if (!src_fpu->initialized || !static_cpu_has(X86_FEATURE_FPU))
-+ if (!static_cpu_has(X86_FEATURE_FPU))
- return 0;
-
- WARN_ON_FPU(src_fpu != &current->thread.fpu);
-@@ -227,14 +226,10 @@ static void fpu__initialize(struct fpu *
- {
- WARN_ON_FPU(fpu != &current->thread.fpu);
-
-- if (!fpu->initialized) {
-- fpstate_init(&fpu->state);
-- trace_x86_fpu_init_state(fpu);
--
-- trace_x86_fpu_activate_state(fpu);
-- /* Safe to do for the current task: */
-- fpu->initialized = 1;
-- }
-+ fpstate_init(&fpu->state);
-+ trace_x86_fpu_init_state(fpu);
-+
-+ trace_x86_fpu_activate_state(fpu);
- }
-
- /*
-@@ -247,32 +242,20 @@ static void fpu__initialize(struct fpu *
- *
- * - or it's called for stopped tasks (ptrace), in which case the
- * registers were already saved by the context-switch code when
-- * the task scheduled out - we only have to initialize the registers
-- * if they've never been initialized.
-+ * the task scheduled out.
- *
- * If the task has used the FPU before then save it.
- */
- void fpu__prepare_read(struct fpu *fpu)
- {
-- if (fpu == &current->thread.fpu) {
-+ if (fpu == &current->thread.fpu)
- fpu__save(fpu);
-- } else {
-- if (!fpu->initialized) {
-- fpstate_init(&fpu->state);
-- trace_x86_fpu_init_state(fpu);
--
-- trace_x86_fpu_activate_state(fpu);
-- /* Safe to do for current and for stopped child tasks: */
-- fpu->initialized = 1;
-- }
-- }
- }
-
- /*
- * This function must be called before we write a task's fpstate.
- *
-- * If the task has used the FPU before then invalidate any cached FPU registers.
-- * If the task has not used the FPU before then initialize its fpstate.
-+ * Invalidate any cached FPU registers.
- *
- * After this function call, after registers in the fpstate are
- * modified and the child task has woken up, the child task will
-@@ -289,17 +272,8 @@ void fpu__prepare_write(struct fpu *fpu)
- */
- WARN_ON_FPU(fpu == &current->thread.fpu);
-
-- if (fpu->initialized) {
-- /* Invalidate any cached state: */
-- __fpu_invalidate_fpregs_state(fpu);
-- } else {
-- fpstate_init(&fpu->state);
-- trace_x86_fpu_init_state(fpu);
--
-- trace_x86_fpu_activate_state(fpu);
-- /* Safe to do for stopped child tasks: */
-- fpu->initialized = 1;
-- }
-+ /* Invalidate any cached state: */
-+ __fpu_invalidate_fpregs_state(fpu);
- }
-
- /*
-@@ -316,17 +290,13 @@ void fpu__drop(struct fpu *fpu)
- preempt_disable();
-
- if (fpu == &current->thread.fpu) {
-- if (fpu->initialized) {
-- /* Ignore delayed exceptions from user space */
-- asm volatile("1: fwait\n"
-- "2:\n"
-- _ASM_EXTABLE(1b, 2b));
-- fpregs_deactivate(fpu);
-- }
-+ /* Ignore delayed exceptions from user space */
-+ asm volatile("1: fwait\n"
-+ "2:\n"
-+ _ASM_EXTABLE(1b, 2b));
-+ fpregs_deactivate(fpu);
- }
-
-- fpu->initialized = 0;
--
- trace_x86_fpu_dropped(fpu);
-
- preempt_enable();
---- a/arch/x86/kernel/fpu/init.c
-+++ b/arch/x86/kernel/fpu/init.c
-@@ -239,8 +239,6 @@ static void __init fpu__init_system_ctx_
-
- WARN_ON_FPU(!on_boot_cpu);
- on_boot_cpu = 0;
--
-- WARN_ON_FPU(current->thread.fpu.initialized);
- }
-
- /*
---- a/arch/x86/kernel/fpu/regset.c
-+++ b/arch/x86/kernel/fpu/regset.c
-@@ -15,16 +15,12 @@
- */
- int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
- {
-- struct fpu *target_fpu = &target->thread.fpu;
--
-- return target_fpu->initialized ? regset->n : 0;
-+ return regset->n;
- }
-
- int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
- {
-- struct fpu *target_fpu = &target->thread.fpu;
--
-- if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->initialized)
-+ if (boot_cpu_has(X86_FEATURE_FXSR))
- return regset->n;
- else
- return 0;
-@@ -370,16 +366,9 @@ int fpregs_set(struct task_struct *targe
- int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
- {
- struct task_struct *tsk = current;
-- struct fpu *fpu = &tsk->thread.fpu;
-- int fpvalid;
--
-- fpvalid = fpu->initialized;
-- if (fpvalid)
-- fpvalid = !fpregs_get(tsk, NULL,
-- 0, sizeof(struct user_i387_ia32_struct),
-- ufpu, NULL);
-
-- return fpvalid;
-+ return !fpregs_get(tsk, NULL, 0, sizeof(struct user_i387_ia32_struct),
-+ ufpu, NULL);
- }
- EXPORT_SYMBOL(dump_fpu);
-
---- a/arch/x86/kernel/fpu/xstate.c
-+++ b/arch/x86/kernel/fpu/xstate.c
-@@ -892,8 +892,6 @@ const void *get_xsave_field_ptr(int xsav
- {
- struct fpu *fpu = &current->thread.fpu;
-
-- if (!fpu->initialized)
-- return NULL;
- /*
- * fpu__save() takes the CPU's xstate registers
- * and saves them off to the 'fpu memory buffer.
---- a/arch/x86/kernel/process_32.c
-+++ b/arch/x86/kernel/process_32.c
-@@ -288,10 +288,10 @@ EXPORT_SYMBOL_GPL(start_thread);
- if (prev->gs | next->gs)
- lazy_load_gs(next->gs);
-
-- switch_fpu_finish(next_fpu, cpu);
--
- this_cpu_write(current_task, next_p);
-
-+ switch_fpu_finish(next_fpu, cpu);
-+
- /* Load the Intel cache allocation PQR MSR. */
- resctrl_sched_in();
-
---- a/arch/x86/kernel/process_64.c
-+++ b/arch/x86/kernel/process_64.c
-@@ -566,14 +566,14 @@ void compat_start_thread(struct pt_regs
-
- x86_fsgsbase_load(prev, next);
-
-- switch_fpu_finish(next_fpu, cpu);
--
- /*
- * Switch the PDA and FPU contexts.
- */
- this_cpu_write(current_task, next_p);
- this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
-
-+ switch_fpu_finish(next_fpu, cpu);
-+
- /* Reload sp0. */
- update_task_stack(next_p);
-
---- a/arch/x86/kernel/signal.c
-+++ b/arch/x86/kernel/signal.c
-@@ -246,7 +246,7 @@ get_sigframe(struct k_sigaction *ka, str
- unsigned long sp = regs->sp;
- unsigned long buf_fx = 0;
- int onsigstack = on_sig_stack(sp);
-- struct fpu *fpu = &current->thread.fpu;
-+ int ret;
-
- /* redzone */
- if (IS_ENABLED(CONFIG_X86_64))
-@@ -265,11 +265,9 @@ get_sigframe(struct k_sigaction *ka, str
- sp = (unsigned long) ka->sa.sa_restorer;
- }
-
-- if (fpu->initialized) {
-- sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
-- &buf_fx, &math_size);
-- *fpstate = (void __user *)sp;
-- }
-+ sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
-+ &buf_fx, &math_size);
-+ *fpstate = (void __user *)sp;
-
- sp = align_sigframe(sp - frame_size);
-
-@@ -281,8 +279,8 @@ get_sigframe(struct k_sigaction *ka, str
- return (void __user *)-1L;
-
- /* save i387 and extended state */
-- if (fpu->initialized &&
-- copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0)
-+ ret = copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size);
-+ if (ret < 0)
- return (void __user *)-1L;
-
- return (void __user *)sp;
-@@ -763,8 +761,7 @@ handle_signal(struct ksignal *ksig, stru
- /*
- * Ensure the signal handler starts with the new fpu state.
- */
-- if (fpu->initialized)
-- fpu__clear(fpu);
-+ fpu__clear(fpu);
- }
- signal_setup_done(failed, ksig, stepping);
- }
---- a/arch/x86/mm/pkeys.c
-+++ b/arch/x86/mm/pkeys.c
-@@ -39,17 +39,12 @@ int __execute_only_pkey(struct mm_struct
- * dance to set PKRU if we do not need to. Check it
- * first and assume that if the execute-only pkey is
- * write-disabled that we do not have to set it
-- * ourselves. We need preempt off so that nobody
-- * can make fpregs inactive.
-+ * ourselves.
- */
-- preempt_disable();
- if (!need_to_set_mm_pkey &&
-- current->thread.fpu.initialized &&
- !__pkru_allows_read(read_pkru(), execute_only_pkey)) {
-- preempt_enable();
- return execute_only_pkey;
- }
-- preempt_enable();
-
- /*
- * Set up PKRU so that it denies access for everything
diff --git a/debian/patches-rt/0008-clocksource-drivers-timer-atmel-tcb-tc_clksrc_suspen.patch b/debian/patches-rt/0008-clocksource-drivers-timer-atmel-tcb-tc_clksrc_suspen.patch
deleted file mode 100644
index 457838441..000000000
--- a/debian/patches-rt/0008-clocksource-drivers-timer-atmel-tcb-tc_clksrc_suspen.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-From: kbuild test robot <lkp@intel.com>
-Date: Fri, 26 Apr 2019 23:47:17 +0200
-Subject: [PATCH 08/10] clocksource/drivers/timer-atmel-tcb:
- tc_clksrc_suspend() can be static
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Statisticize tc_clksrc_suspend and tc_clksrc_resume.
-
-Signed-off-by: kbuild test robot <lkp@intel.com>
-Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/clocksource/timer-atmel-tcb.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/drivers/clocksource/timer-atmel-tcb.c
-+++ b/drivers/clocksource/timer-atmel-tcb.c
-@@ -66,7 +66,7 @@ static u64 tc_get_cycles32(struct clocks
- return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
- }
-
--void tc_clksrc_suspend(struct clocksource *cs)
-+static void tc_clksrc_suspend(struct clocksource *cs)
- {
- int i;
-
-@@ -81,7 +81,7 @@ void tc_clksrc_suspend(struct clocksourc
- bmr_cache = readl(tcaddr + ATMEL_TC_BMR);
- }
-
--void tc_clksrc_resume(struct clocksource *cs)
-+static void tc_clksrc_resume(struct clocksource *cs)
- {
- int i;
-
diff --git a/debian/patches-rt/0008-printk-add-ring-buffer-and-kthread.patch b/debian/patches-rt/0008-printk-add-ring-buffer-and-kthread.patch
index f5557dd3c..60e3313b9 100644
--- a/debian/patches-rt/0008-printk-add-ring-buffer-and-kthread.patch
+++ b/debian/patches-rt/0008-printk-add-ring-buffer-and-kthread.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:46 +0100
Subject: [PATCH 08/25] printk: add ring buffer and kthread
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The printk ring buffer provides an NMI-safe interface for writing
messages to a ring buffer. Using such a buffer for alleviates printk
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -44,6 +44,8 @@
+@@ -45,6 +45,8 @@
#include <linux/irq_work.h>
#include <linux/ctype.h>
#include <linux/uio.h>
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/sched/clock.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
-@@ -397,7 +399,12 @@ DEFINE_RAW_SPINLOCK(logbuf_lock);
+@@ -407,7 +409,12 @@ DEFINE_RAW_SPINLOCK(logbuf_lock);
printk_safe_exit_irqrestore(flags); \
} while (0)
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* the next printk record to read by syslog(READ) or /proc/kmsg */
static u64 syslog_seq;
-@@ -744,6 +751,10 @@ static ssize_t msg_print_ext_body(char *
+@@ -770,6 +777,10 @@ static ssize_t msg_print_ext_body(char *
return p - buf;
}
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* /dev/kmsg - userspace message inject/listen interface */
struct devkmsg_user {
u64 seq;
-@@ -1566,6 +1577,34 @@ SYSCALL_DEFINE3(syslog, int, type, char
+@@ -1610,6 +1621,34 @@ SYSCALL_DEFINE3(syslog, int, type, char
return do_syslog(type, buf, len, SYSLOG_FROM_READER);
}
@@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Special console_lock variants that help to reduce the risk of soft-lockups.
* They allow to pass console_lock to another printk() call using a busy wait.
-@@ -2899,6 +2938,72 @@ void wake_up_klogd(void)
+@@ -2964,6 +3003,72 @@ void wake_up_klogd(void)
preempt_enable();
}
diff --git a/debian/patches-rt/0008-x86-fpu-Remove-user_fpu_begin.patch b/debian/patches-rt/0008-x86-fpu-Remove-user_fpu_begin.patch
deleted file mode 100644
index bcab836ce..000000000
--- a/debian/patches-rt/0008-x86-fpu-Remove-user_fpu_begin.patch
+++ /dev/null
@@ -1,83 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu, 18 Oct 2018 18:34:11 +0200
-Subject: [PATCH 08/27] x86/fpu: Remove user_fpu_begin()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-user_fpu_begin() sets fpu_fpregs_owner_ctx to task's fpu struct. This is
-always the case since there is no lazy FPU anymore.
-
-fpu_fpregs_owner_ctx is used during context switch to decide if it needs
-to load the saved registers or if the currently loaded registers are
-valid. It could be skipped during
- taskA -> kernel thread -> taskA
-
-because the switch to kernel thread would not alter the CPU's FPU state.
-
-Since this field is always updated during context switch and never
-invalidated, setting it manually (in user context) makes no difference.
-A kernel thread with kernel_fpu_begin() block could set
-fpu_fpregs_owner_ctx to NULL but a kernel thread does not use
-user_fpu_begin().
-This is a leftover from the lazy-FPU time.
-
-Remove user_fpu_begin(), it does not change fpu_fpregs_owner_ctx's
-content.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Reviewed-by: Borislav Petkov <bp@suse.de>
----
- arch/x86/include/asm/fpu/internal.h | 17 -----------------
- arch/x86/kernel/fpu/core.c | 4 +---
- arch/x86/kernel/fpu/signal.c | 1 -
- 3 files changed, 1 insertion(+), 21 deletions(-)
-
---- a/arch/x86/include/asm/fpu/internal.h
-+++ b/arch/x86/include/asm/fpu/internal.h
-@@ -564,23 +564,6 @@ static inline void switch_fpu_finish(str
- }
-
- /*
-- * Needs to be preemption-safe.
-- *
-- * NOTE! user_fpu_begin() must be used only immediately before restoring
-- * the save state. It does not do any saving/restoring on its own. In
-- * lazy FPU mode, it is just an optimization to avoid a #NM exception,
-- * the task can lose the FPU right after preempt_enable().
-- */
--static inline void user_fpu_begin(void)
--{
-- struct fpu *fpu = &current->thread.fpu;
--
-- preempt_disable();
-- fpregs_activate(fpu);
-- preempt_enable();
--}
--
--/*
- * MXCSR and XCR definitions:
- */
-
---- a/arch/x86/kernel/fpu/core.c
-+++ b/arch/x86/kernel/fpu/core.c
-@@ -335,10 +335,8 @@ void fpu__clear(struct fpu *fpu)
- * Make sure fpstate is cleared and initialized.
- */
- fpu__initialize(fpu);
-- if (static_cpu_has(X86_FEATURE_FPU)) {
-- user_fpu_begin();
-+ if (static_cpu_has(X86_FEATURE_FPU))
- copy_init_fpstate_to_fpregs();
-- }
- }
-
- /*
---- a/arch/x86/kernel/fpu/signal.c
-+++ b/arch/x86/kernel/fpu/signal.c
-@@ -322,7 +322,6 @@ static int __fpu__restore_sig(void __use
- * For 64-bit frames and 32-bit fsave frames, restore the user
- * state to the registers directly (with exceptions handled).
- */
-- user_fpu_begin();
- if (copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only)) {
- fpu__clear(fpu);
- return -1;
diff --git a/debian/patches-rt/0009-misc-atmel_tclib-do-not-probe-already-used-TCBs.patch b/debian/patches-rt/0009-misc-atmel_tclib-do-not-probe-already-used-TCBs.patch
deleted file mode 100644
index 310baa7de..000000000
--- a/debian/patches-rt/0009-misc-atmel_tclib-do-not-probe-already-used-TCBs.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Date: Fri, 26 Apr 2019 23:47:18 +0200
-Subject: [PATCH 09/10] misc: atmel_tclib: do not probe already used TCBs
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-The TCBs that have children are using the proper DT bindings and don't need
-to be handled by tclib.
-
-Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/misc/atmel_tclib.c | 3 +++
- 1 file changed, 3 insertions(+)
-
---- a/drivers/misc/atmel_tclib.c
-+++ b/drivers/misc/atmel_tclib.c
-@@ -111,6 +111,9 @@ static int __init tc_probe(struct platfo
- struct resource *r;
- unsigned int i;
-
-+ if (of_get_child_count(pdev->dev.of_node))
-+ return -EBUSY;
-+
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -EINVAL;
diff --git a/debian/patches-rt/0009-printk-remove-exclusive-console-hack.patch b/debian/patches-rt/0009-printk-remove-exclusive-console-hack.patch
index 5f6ffe1e9..343222ac6 100644
--- a/debian/patches-rt/0009-printk-remove-exclusive-console-hack.patch
+++ b/debian/patches-rt/0009-printk-remove-exclusive-console-hack.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:47 +0100
Subject: [PATCH 09/25] printk: remove exclusive console hack
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
In order to support printing the printk log history when new
consoles are registered, a global exclusive_console variable is
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -251,11 +251,6 @@ static void __up_console_sem(unsigned lo
+@@ -259,11 +259,6 @@ static void __up_console_sem(unsigned lo
static int console_locked, console_suspended;
/*
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Array of consoles built from command line options (console=)
*/
-@@ -423,7 +418,6 @@ static u32 log_next_idx;
+@@ -433,7 +428,6 @@ static u32 log_next_idx;
/* the next printk record to write to the console */
static u64 console_seq;
static u32 console_idx;
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* the next printk record to read after the last 'clear' command */
static u64 clear_seq;
-@@ -1761,8 +1755,6 @@ static void call_console_drivers(const c
+@@ -1805,8 +1799,6 @@ static void call_console_drivers(const c
return;
for_each_console(con) {
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!(con->flags & CON_ENABLED))
continue;
if (!con->write)
-@@ -2044,7 +2036,6 @@ static u64 syslog_seq;
+@@ -2099,7 +2091,6 @@ static u64 syslog_seq;
static u32 syslog_idx;
static u64 console_seq;
static u32 console_idx;
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static u64 log_first_seq;
static u32 log_first_idx;
static u64 log_next_seq;
-@@ -2413,12 +2404,6 @@ void console_unlock(void)
+@@ -2468,12 +2459,6 @@ void console_unlock(void)
goto skip;
}
@@ -71,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
len += msg_print_text(msg,
console_msg_format & MSG_FORMAT_SYSLOG,
printk_time, text + len, sizeof(text) - len);
-@@ -2736,17 +2721,6 @@ void register_console(struct console *ne
+@@ -2801,17 +2786,6 @@ void register_console(struct console *ne
logbuf_lock_irqsave(flags);
console_seq = syslog_seq;
console_idx = syslog_idx;
@@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
logbuf_unlock_irqrestore(flags);
}
console_unlock();
-@@ -2758,6 +2732,10 @@ void register_console(struct console *ne
+@@ -2823,6 +2797,10 @@ void register_console(struct console *ne
* boot consoles, real consoles, etc - this is to ensure that end
* users know there might be something in the kernel's log buffer that
* went to the bootconsole (that they do not see on the real console)
diff --git a/debian/patches-rt/0009-x86-fpu-Add-__-make_fpregs_active-helpers.patch b/debian/patches-rt/0009-x86-fpu-Add-__-make_fpregs_active-helpers.patch
deleted file mode 100644
index e0f829ae6..000000000
--- a/debian/patches-rt/0009-x86-fpu-Add-__-make_fpregs_active-helpers.patch
+++ /dev/null
@@ -1,77 +0,0 @@
-From: Rik van Riel <riel@surriel.com>
-Date: Sun, 9 Sep 2018 18:30:45 +0200
-Subject: [PATCH 09/27] x86/fpu: Add (__)make_fpregs_active helpers
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Add helper function that ensures the floating point registers for
-the current task are active. Use with preemption disabled.
-
-Signed-off-by: Rik van Riel <riel@surriel.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/include/asm/fpu/api.h | 11 +++++++++++
- arch/x86/include/asm/fpu/internal.h | 19 +++++++++++--------
- 2 files changed, 22 insertions(+), 8 deletions(-)
-
---- a/arch/x86/include/asm/fpu/api.h
-+++ b/arch/x86/include/asm/fpu/api.h
-@@ -10,6 +10,7 @@
-
- #ifndef _ASM_X86_FPU_API_H
- #define _ASM_X86_FPU_API_H
-+#include <linux/preempt.h>
-
- /*
- * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
-@@ -22,6 +23,16 @@ extern void kernel_fpu_begin(void);
- extern void kernel_fpu_end(void);
- extern bool irq_fpu_usable(void);
-
-+static inline void fpregs_lock(void)
-+{
-+ preempt_disable();
-+}
-+
-+static inline void fpregs_unlock(void)
-+{
-+ preempt_enable();
-+}
-+
- /*
- * Query the presence of one or more xfeatures. Works on any legacy CPU as well.
- *
---- a/arch/x86/include/asm/fpu/internal.h
-+++ b/arch/x86/include/asm/fpu/internal.h
-@@ -515,6 +515,15 @@ static inline void fpregs_activate(struc
- trace_x86_fpu_regs_activated(fpu);
- }
-
-+static inline void __fpregs_load_activate(struct fpu *fpu, int cpu)
-+{
-+ if (!fpregs_state_valid(fpu, cpu)) {
-+ if (current->mm)
-+ copy_kernel_to_fpregs(&fpu->state);
-+ fpregs_activate(fpu);
-+ }
-+}
-+
- /*
- * FPU state switching for scheduling.
- *
-@@ -553,14 +562,8 @@ switch_fpu_prepare(struct fpu *old_fpu,
- */
- static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
- {
-- if (static_cpu_has(X86_FEATURE_FPU)) {
-- if (!fpregs_state_valid(new_fpu, cpu)) {
-- if (current->mm)
-- copy_kernel_to_fpregs(&new_fpu->state);
-- }
--
-- fpregs_activate(new_fpu);
-- }
-+ if (static_cpu_has(X86_FEATURE_FPU))
-+ __fpregs_load_activate(new_fpu, cpu);
- }
-
- /*
diff --git a/debian/patches-rt/0010-printk-redirect-emit-store-to-new-ringbuffer.patch b/debian/patches-rt/0010-printk-redirect-emit-store-to-new-ringbuffer.patch
index 397f8d152..ff1998c2c 100644
--- a/debian/patches-rt/0010-printk-redirect-emit-store-to-new-ringbuffer.patch
+++ b/debian/patches-rt/0010-printk-redirect-emit-store-to-new-ringbuffer.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:48 +0100
Subject: [PATCH 10/25] printk: redirect emit/store to new ringbuffer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
vprintk_emit and vprintk_store are the main functions that all printk
variants eventually go through. Change these to store the message in
@@ -21,12 +21,12 @@ NOTE: LOG_CONT is ignored for now and handled as individual messages.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/printk/printk.c | 317 +++++++------------------------------------------
- 1 file changed, 50 insertions(+), 267 deletions(-)
+ kernel/printk/printk.c | 325 +++++++------------------------------------------
+ 1 file changed, 51 insertions(+), 274 deletions(-)
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -493,90 +493,6 @@ static u32 log_next(u32 idx)
+@@ -507,90 +507,6 @@ static u32 log_next(u32 idx)
return idx + msg->len;
}
@@ -115,9 +115,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-}
-
/* insert record into the buffer, discard old ones, update heads */
- static int log_store(int facility, int level,
+ static int log_store(u32 caller_id, int facility, int level,
enum log_flags flags, u64 ts_nsec,
-@@ -584,54 +500,36 @@ static int log_store(int facility, int l
+@@ -598,57 +514,39 @@ static int log_store(u32 caller_id, int
const char *text, u16 text_len)
{
struct printk_log *msg;
@@ -174,8 +174,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- msg->ts_nsec = ts_nsec;
- else
- msg->ts_nsec = local_clock();
-- memset(log_dict(msg) + dict_len, 0, pad_len);
+ msg->ts_nsec = ts_nsec;
+ #ifdef CONFIG_PRINTK_CALLER
+ msg->caller_id = caller_id;
+ #endif
+- memset(log_dict(msg) + dict_len, 0, pad_len);
msg->len = size;
/* insert message */
@@ -185,7 +188,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return msg->text_len;
}
-@@ -1675,70 +1573,6 @@ static int console_lock_spinning_disable
+@@ -1719,70 +1617,6 @@ static int console_lock_spinning_disable
return 1;
}
@@ -256,7 +259,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Call the console drivers, asking them to write out
* log_buf[start] to log_buf[end - 1].
-@@ -1759,7 +1593,7 @@ static void call_console_drivers(const c
+@@ -1803,7 +1637,7 @@ static void call_console_drivers(const c
continue;
if (!con->write)
continue;
@@ -265,8 +268,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
!(con->flags & CON_ANYTIME))
continue;
if (con->flags & CON_EXTENDED)
-@@ -1783,6 +1617,8 @@ static inline void printk_delay(void)
- }
+@@ -1833,6 +1667,8 @@ static inline u32 printk_caller_id(void)
+ 0x80000000 + raw_smp_processor_id();
}
+/* FIXME: no support for LOG_CONT */
@@ -274,7 +277,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Continuation lines are buffered, and not committed to the record buffer
* until the line is complete, or a race forces it. The line fragments
-@@ -1837,53 +1673,44 @@ static bool cont_add(int facility, int l
+@@ -1888,56 +1724,45 @@ static bool cont_add(u32 caller_id, int
return true;
}
@@ -282,13 +285,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-static size_t log_output(int facility, int level, enum log_flags lflags, const char *dict, size_t dictlen, char *text, size_t text_len)
-{
+- const u32 caller_id = printk_caller_id();
+-
- /*
- * If an earlier line was buffered, and we're a continuation
-- * write from the same process, try to add it to the buffer.
+- * write from the same context, try to add it to the buffer.
- */
- if (cont.len) {
-- if (cont.owner == current && (lflags & LOG_CONT)) {
-- if (cont_add(facility, level, lflags, text, text_len))
+- if (cont.caller_id == caller_id && (lflags & LOG_CONT)) {
+- if (cont_add(caller_id, facility, level, lflags, text, text_len))
- return text_len;
- }
- /* Otherwise, make sure it's flushed */
@@ -301,12 +306,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- /* If it doesn't end in a newline, try to buffer the current line */
- if (!(lflags & LOG_NEWLINE)) {
-- if (cont_add(facility, level, lflags, text, text_len))
+- if (cont_add(caller_id, facility, level, lflags, text, text_len))
- return text_len;
- }
-
- /* Store it in the record log */
-- return log_store(facility, level, lflags, 0, dict, dictlen, text, text_len);
+- return log_store(caller_id, facility, level, lflags, 0,
+- dict, dictlen, text, text_len);
-}
-
-/* Must be called under logbuf_lock. */
@@ -329,6 +335,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ const char *dict, size_t dictlen,
+ const char *fmt, va_list args)
+{
++ const u32 caller_id = printk_caller_id();
enum log_flags lflags = 0;
+ int printed_len = 0;
+ struct prb_handle h;
@@ -349,18 +356,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ prb_inc_lost(&printk_rb);
+ return printed_len;
+ }
-
-- /* mark and strip a trailing newline */
++
+ text = rbuf;
+ text_len = vscnprintf(text, PRINTK_SPRINT_MAX, fmt, args);
-+
+
+- /* mark and strip a trailing newline */
+ /* strip and flag a trailing newline */
if (text_len && text[text_len-1] == '\n') {
text_len--;
lflags |= LOG_NEWLINE;
-@@ -1917,54 +1744,10 @@ int vprintk_store(int facility, int leve
+@@ -1968,58 +1793,10 @@ int vprintk_store(int facility, int leve
if (dict)
- lflags |= LOG_PREFIX|LOG_NEWLINE;
+ lflags |= LOG_NEWLINE;
- return log_output(facility, level, lflags,
- dict, dictlen, text, text_len);
@@ -375,6 +382,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- unsigned long flags;
- u64 curr_log_seq;
-
+- /* Suppress unimportant messages after panic happens */
+- if (unlikely(suppress_printk))
+- return 0;
+-
- if (level == LOGLEVEL_SCHED) {
- level = LOGLEVEL_DEFAULT;
- in_sched = true;
@@ -407,7 +418,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- console_unlock();
- preempt_enable();
- }
-+ printed_len = log_store(facility, level, lflags, ts_nsec,
++ printed_len = log_store(caller_id, facility, level, lflags, ts_nsec,
+ dict, dictlen, text, text_len);
- if (pending_output)
@@ -416,7 +427,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return printed_len;
}
EXPORT_SYMBOL(vprintk_emit);
-@@ -2429,7 +2212,7 @@ void console_unlock(void)
+@@ -2484,7 +2261,7 @@ void console_unlock(void)
console_lock_spinning_enable();
stop_critical_timings(); /* don't trace print latency */
diff --git a/debian/patches-rt/0010-x86-fpu-Make-__raw_xsave_addr-use-feature-number-ins.patch b/debian/patches-rt/0010-x86-fpu-Make-__raw_xsave_addr-use-feature-number-ins.patch
deleted file mode 100644
index 7e4c8ec88..000000000
--- a/debian/patches-rt/0010-x86-fpu-Make-__raw_xsave_addr-use-feature-number-ins.patch
+++ /dev/null
@@ -1,98 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 2 Oct 2018 10:28:15 +0200
-Subject: [PATCH 10/27] x86/fpu: Make __raw_xsave_addr() use feature number
- instead of mask
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Most users of __raw_xsave_addr() use a feature number, shift it to a
-mask and then __raw_xsave_addr() shifts it back to the feature number.
-
-Make __raw_xsave_addr() use the feature number as an argument.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Reviewed-by: Borislav Petkov <bp@suse.de>
----
- arch/x86/kernel/fpu/xstate.c | 22 +++++++++++-----------
- 1 file changed, 11 insertions(+), 11 deletions(-)
-
---- a/arch/x86/kernel/fpu/xstate.c
-+++ b/arch/x86/kernel/fpu/xstate.c
-@@ -805,20 +805,18 @@ void fpu__resume_cpu(void)
- }
-
- /*
-- * Given an xstate feature mask, calculate where in the xsave
-+ * Given an xstate feature nr, calculate where in the xsave
- * buffer the state is. Callers should ensure that the buffer
- * is valid.
- */
--static void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask)
-+static void *__raw_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
- {
-- int feature_nr = fls64(xstate_feature_mask) - 1;
--
-- if (!xfeature_enabled(feature_nr)) {
-+ if (!xfeature_enabled(xfeature_nr)) {
- WARN_ON_FPU(1);
- return NULL;
- }
-
-- return (void *)xsave + xstate_comp_offsets[feature_nr];
-+ return (void *)xsave + xstate_comp_offsets[xfeature_nr];
- }
- /*
- * Given the xsave area and a state inside, this function returns the
-@@ -840,6 +838,7 @@ static void *__raw_xsave_addr(struct xre
- */
- void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature)
- {
-+ int xfeature_nr;
- /*
- * Do we even *have* xsave state?
- */
-@@ -867,7 +866,8 @@ void *get_xsave_addr(struct xregs_state
- if (!(xsave->header.xfeatures & xstate_feature))
- return NULL;
-
-- return __raw_xsave_addr(xsave, xstate_feature);
-+ xfeature_nr = fls64(xstate_feature) - 1;
-+ return __raw_xsave_addr(xsave, xfeature_nr);
- }
- EXPORT_SYMBOL_GPL(get_xsave_addr);
-
-@@ -1014,7 +1014,7 @@ int copy_xstate_to_kernel(void *kbuf, st
- * Copy only in-use xstates:
- */
- if ((header.xfeatures >> i) & 1) {
-- void *src = __raw_xsave_addr(xsave, 1 << i);
-+ void *src = __raw_xsave_addr(xsave, i);
-
- offset = xstate_offsets[i];
- size = xstate_sizes[i];
-@@ -1100,7 +1100,7 @@ int copy_xstate_to_user(void __user *ubu
- * Copy only in-use xstates:
- */
- if ((header.xfeatures >> i) & 1) {
-- void *src = __raw_xsave_addr(xsave, 1 << i);
-+ void *src = __raw_xsave_addr(xsave, i);
-
- offset = xstate_offsets[i];
- size = xstate_sizes[i];
-@@ -1157,7 +1157,7 @@ int copy_kernel_to_xstate(struct xregs_s
- u64 mask = ((u64)1 << i);
-
- if (hdr.xfeatures & mask) {
-- void *dst = __raw_xsave_addr(xsave, 1 << i);
-+ void *dst = __raw_xsave_addr(xsave, i);
-
- offset = xstate_offsets[i];
- size = xstate_sizes[i];
-@@ -1211,7 +1211,7 @@ int copy_user_to_xstate(struct xregs_sta
- u64 mask = ((u64)1 << i);
-
- if (hdr.xfeatures & mask) {
-- void *dst = __raw_xsave_addr(xsave, 1 << i);
-+ void *dst = __raw_xsave_addr(xsave, i);
-
- offset = xstate_offsets[i];
- size = xstate_sizes[i];
diff --git a/debian/patches-rt/0011-printk_safe-remove-printk-safe-code.patch b/debian/patches-rt/0011-printk_safe-remove-printk-safe-code.patch
index 8edbecc4e..5c3719fd3 100644
--- a/debian/patches-rt/0011-printk_safe-remove-printk-safe-code.patch
+++ b/debian/patches-rt/0011-printk_safe-remove-printk-safe-code.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:49 +0100
Subject: [PATCH 11/25] printk_safe: remove printk safe code
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
vprintk variants are now NMI-safe so there is no longer a need for
the "safe" calls.
@@ -21,15 +21,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
kernel/printk/Makefile | 1
kernel/printk/internal.h | 30 --
kernel/printk/printk.c | 13 -
- kernel/printk/printk_safe.c | 427 -----------------------------------------
+ kernel/printk/printk_safe.c | 415 -----------------------------------------
kernel/trace/trace.c | 2
lib/nmi_backtrace.c | 6
- 13 files changed, 7 insertions(+), 512 deletions(-)
+ 13 files changed, 7 insertions(+), 500 deletions(-)
delete mode 100644 kernel/printk/printk_safe.c
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
-@@ -174,7 +174,6 @@ extern void panic_flush_kmsg_start(void)
+@@ -171,7 +171,6 @@ extern void panic_flush_kmsg_start(void)
extern void panic_flush_kmsg_end(void)
{
@@ -71,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
-@@ -145,18 +145,6 @@ static inline __printf(1, 2) __cold
+@@ -146,18 +146,6 @@ static inline __printf(1, 2) __cold
void early_printk(const char *s, ...) { }
#endif
@@ -90,7 +90,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_PRINTK
asmlinkage __printf(5, 0)
int vprintk_emit(int facility, int level,
-@@ -201,9 +189,6 @@ void __init setup_log_buf(int early);
+@@ -202,9 +190,6 @@ void __init setup_log_buf(int early);
void dump_stack_print_info(const char *log_lvl);
void show_regs_print_info(const char *log_lvl);
extern asmlinkage void dump_stack(void) __cold;
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#else
static inline __printf(1, 0)
int vprintk(const char *s, va_list args)
-@@ -267,18 +252,6 @@ static inline void show_regs_print_info(
+@@ -268,18 +253,6 @@ static inline void show_regs_print_info(
static inline void dump_stack(void)
{
}
@@ -121,9 +121,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern int kptr_restrict;
--- a/init/main.c
+++ b/init/main.c
-@@ -648,7 +648,6 @@ asmlinkage __visible void __init start_k
- softirq_init();
- timekeeping_init();
+@@ -669,7 +669,6 @@ asmlinkage __visible void __init start_k
+ boot_init_stack_canary();
+
time_init();
- printk_safe_init();
perf_event_init();
@@ -131,7 +131,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
call_function_init();
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
-@@ -972,7 +972,6 @@ void crash_kexec(struct pt_regs *regs)
+@@ -970,7 +970,6 @@ void crash_kexec(struct pt_regs *regs)
old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
if (old_cpu == PANIC_CPU_INVALID) {
/* This is the 1st CPU which comes here, so go ahead. */
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/panic.c
+++ b/kernel/panic.c
-@@ -223,7 +223,6 @@ void panic(const char *fmt, ...)
+@@ -228,7 +228,6 @@ void panic(const char *fmt, ...)
* Bypass the panic_cpu check and call __crash_kexec directly.
*/
if (!_crash_kexec_post_notifiers) {
@@ -149,7 +149,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__crash_kexec(NULL);
/*
-@@ -247,8 +246,6 @@ void panic(const char *fmt, ...)
+@@ -252,8 +251,6 @@ void panic(const char *fmt, ...)
*/
atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
@@ -160,13 +160,14 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/printk/Makefile
+++ b/kernel/printk/Makefile
-@@ -1,3 +1,2 @@
+@@ -1,4 +1,3 @@
+ # SPDX-License-Identifier: GPL-2.0-only
obj-y = printk.o
-obj-$(CONFIG_PRINTK) += printk_safe.o
obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
-@@ -32,32 +32,6 @@ int vprintk_store(int facility, int leve
+@@ -20,32 +20,6 @@ int vprintk_store(int facility, int leve
__printf(1, 0) int vprintk_default(const char *fmt, va_list args);
__printf(1, 0) int vprintk_deferred(const char *fmt, va_list args);
__printf(1, 0) int vprintk_func(const char *fmt, va_list args);
@@ -199,7 +200,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void defer_console_output(void);
-@@ -70,10 +44,10 @@ void defer_console_output(void);
+@@ -58,10 +32,10 @@ void defer_console_output(void);
* semaphore and some of console functions (console_unlock()/etc.), so
* printk-safe must preserve the existing local IRQ guarantees.
*/
@@ -214,7 +215,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-#endif /* CONFIG_PRINTK */
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1675,13 +1675,6 @@ static bool cont_add(int facility, int l
+@@ -1726,13 +1726,6 @@ static bool cont_add(u32 caller_id, int
}
#endif /* 0 */
@@ -228,7 +229,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* ring buffer used as memory allocator for temporary sprint buffers */
DECLARE_STATIC_PRINTKRB(sprint_rb,
ilog2(PRINTK_RECORD_MAX + sizeof(struct prb_entry) +
-@@ -1752,6 +1745,11 @@ asmlinkage int vprintk_emit(int facility
+@@ -1801,6 +1794,11 @@ asmlinkage int vprintk_emit(int facility
}
EXPORT_SYMBOL(vprintk_emit);
@@ -240,7 +241,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
asmlinkage int vprintk(const char *fmt, va_list args)
{
return vprintk_func(fmt, args);
-@@ -3142,5 +3140,4 @@ void kmsg_dump_rewind(struct kmsg_dumper
+@@ -3201,5 +3199,4 @@ void kmsg_dump_rewind(struct kmsg_dumper
logbuf_unlock_irqrestore(flags);
}
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
@@ -248,22 +249,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
--- a/kernel/printk/printk_safe.c
+++ /dev/null
-@@ -1,427 +0,0 @@
+@@ -1,415 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * printk_safe.c - Safe printk for printk-deadlock-prone contexts
-- *
-- * This program is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License
-- * as published by the Free Software Foundation; either version 2
-- * of the License, or (at your option) any later version.
-- *
-- * This program is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * You should have received a copy of the GNU General Public License
-- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/preempt.h>
@@ -678,7 +667,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-}
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -8366,7 +8366,6 @@ void ftrace_dump(enum ftrace_dump_mode o
+@@ -8874,7 +8874,6 @@ void ftrace_dump(enum ftrace_dump_mode o
tracing_off();
local_irq_save(flags);
@@ -686,7 +675,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Simulate the iterator */
trace_init_global_iter(&iter);
-@@ -8447,7 +8446,6 @@ void ftrace_dump(enum ftrace_dump_mode o
+@@ -8951,7 +8950,6 @@ void ftrace_dump(enum ftrace_dump_mode o
atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
}
atomic_dec(&dump_running);
diff --git a/debian/patches-rt/0011-x86-fpu-Make-get_xsave_field_ptr-and-get_xsave_addr-.patch b/debian/patches-rt/0011-x86-fpu-Make-get_xsave_field_ptr-and-get_xsave_addr-.patch
deleted file mode 100644
index 5dc7b4dbc..000000000
--- a/debian/patches-rt/0011-x86-fpu-Make-get_xsave_field_ptr-and-get_xsave_addr-.patch
+++ /dev/null
@@ -1,232 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu, 18 Oct 2018 12:58:06 +0200
-Subject: [PATCH 11/27] x86/fpu: Make get_xsave_field_ptr() and
- get_xsave_addr() use feature number instead of mask
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-After changing the argument of __raw_xsave_addr() from a mask to number
-Dave suggested to check if it makes sense to do the same for
-get_xsave_addr(). As it turns out it does. Only get_xsave_addr() needs
-the mask to check if the requested feature is part of what is
-support/saved and then uses the number again. The shift operation is
-cheaper compared to "find last bit set". Also, the feature number uses
-less opcode space compared to the mask :)
-
-Make get_xsave_addr() argument a xfeature number instead of mask and fix
-up its callers.
-As part of this use xfeature_nr and xfeature_mask consistently.
-This results in changes to the kvm code as:
- feature -> xfeature_mask
- index -> xfeature_nr
-
-Suggested-by: Dave Hansen <dave.hansen@linux.intel.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/include/asm/fpu/xstate.h | 4 ++--
- arch/x86/kernel/fpu/xstate.c | 22 ++++++++++------------
- arch/x86/kernel/traps.c | 2 +-
- arch/x86/kvm/x86.c | 28 ++++++++++++++--------------
- arch/x86/mm/mpx.c | 6 +++---
- 5 files changed, 30 insertions(+), 32 deletions(-)
-
---- a/arch/x86/include/asm/fpu/xstate.h
-+++ b/arch/x86/include/asm/fpu/xstate.h
-@@ -46,8 +46,8 @@ extern void __init update_regset_xstate_
- u64 xstate_mask);
-
- void fpu__xstate_clear_all_cpu_caps(void);
--void *get_xsave_addr(struct xregs_state *xsave, int xstate);
--const void *get_xsave_field_ptr(int xstate_field);
-+void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr);
-+const void *get_xsave_field_ptr(int xfeature_nr);
- int using_compacted_format(void);
- int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
- int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
---- a/arch/x86/kernel/fpu/xstate.c
-+++ b/arch/x86/kernel/fpu/xstate.c
-@@ -830,15 +830,14 @@ static void *__raw_xsave_addr(struct xre
- *
- * Inputs:
- * xstate: the thread's storage area for all FPU data
-- * xstate_feature: state which is defined in xsave.h (e.g.
-- * XFEATURE_MASK_FP, XFEATURE_MASK_SSE, etc...)
-+ * xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP,
-+ * XFEATURE_SSE, etc...)
- * Output:
- * address of the state in the xsave area, or NULL if the
- * field is not present in the xsave buffer.
- */
--void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature)
-+void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
- {
-- int xfeature_nr;
- /*
- * Do we even *have* xsave state?
- */
-@@ -850,11 +849,11 @@ void *get_xsave_addr(struct xregs_state
- * have not enabled. Remember that pcntxt_mask is
- * what we write to the XCR0 register.
- */
-- WARN_ONCE(!(xfeatures_mask & xstate_feature),
-+ WARN_ONCE(!(xfeatures_mask & BIT_ULL(xfeature_nr)),
- "get of unsupported state");
- /*
- * This assumes the last 'xsave*' instruction to
-- * have requested that 'xstate_feature' be saved.
-+ * have requested that 'xfeature_nr' be saved.
- * If it did not, we might be seeing and old value
- * of the field in the buffer.
- *
-@@ -863,10 +862,9 @@ void *get_xsave_addr(struct xregs_state
- * or because the "init optimization" caused it
- * to not be saved.
- */
-- if (!(xsave->header.xfeatures & xstate_feature))
-+ if (!(xsave->header.xfeatures & BIT_ULL(xfeature_nr)))
- return NULL;
-
-- xfeature_nr = fls64(xstate_feature) - 1;
- return __raw_xsave_addr(xsave, xfeature_nr);
- }
- EXPORT_SYMBOL_GPL(get_xsave_addr);
-@@ -882,13 +880,13 @@ EXPORT_SYMBOL_GPL(get_xsave_addr);
- * Note that this only works on the current task.
- *
- * Inputs:
-- * @xsave_state: state which is defined in xsave.h (e.g. XFEATURE_MASK_FP,
-- * XFEATURE_MASK_SSE, etc...)
-+ * @xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP,
-+ * XFEATURE_SSE, etc...)
- * Output:
- * address of the state in the xsave area or NULL if the state
- * is not present or is in its 'init state'.
- */
--const void *get_xsave_field_ptr(int xsave_state)
-+const void *get_xsave_field_ptr(int xfeature_nr)
- {
- struct fpu *fpu = &current->thread.fpu;
-
-@@ -898,7 +896,7 @@ const void *get_xsave_field_ptr(int xsav
- */
- fpu__save(fpu);
-
-- return get_xsave_addr(&fpu->state.xsave, xsave_state);
-+ return get_xsave_addr(&fpu->state.xsave, xfeature_nr);
- }
-
- #ifdef CONFIG_ARCH_HAS_PKEYS
---- a/arch/x86/kernel/traps.c
-+++ b/arch/x86/kernel/traps.c
-@@ -455,7 +455,7 @@ dotraplinkage void do_bounds(struct pt_r
- * which is all zeros which indicates MPX was not
- * responsible for the exception.
- */
-- bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
-+ bndcsr = get_xsave_field_ptr(XFEATURE_BNDCSR);
- if (!bndcsr)
- goto exit_trap;
-
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -3675,15 +3675,15 @@ static void fill_xsave(u8 *dest, struct
- */
- valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
- while (valid) {
-- u64 feature = valid & -valid;
-- int index = fls64(feature) - 1;
-- void *src = get_xsave_addr(xsave, feature);
-+ u64 xfeature_mask = valid & -valid;
-+ int xfeature_nr = fls64(xfeature_mask) - 1;
-+ void *src = get_xsave_addr(xsave, xfeature_nr);
-
- if (src) {
- u32 size, offset, ecx, edx;
-- cpuid_count(XSTATE_CPUID, index,
-+ cpuid_count(XSTATE_CPUID, xfeature_nr,
- &size, &offset, &ecx, &edx);
-- if (feature == XFEATURE_MASK_PKRU)
-+ if (xfeature_nr == XFEATURE_PKRU)
- memcpy(dest + offset, &vcpu->arch.pkru,
- sizeof(vcpu->arch.pkru));
- else
-@@ -3691,7 +3691,7 @@ static void fill_xsave(u8 *dest, struct
-
- }
-
-- valid -= feature;
-+ valid -= xfeature_mask;
- }
- }
-
-@@ -3718,22 +3718,22 @@ static void load_xsave(struct kvm_vcpu *
- */
- valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
- while (valid) {
-- u64 feature = valid & -valid;
-- int index = fls64(feature) - 1;
-- void *dest = get_xsave_addr(xsave, feature);
-+ u64 xfeature_mask = valid & -valid;
-+ int xfeature_nr = fls64(xfeature_mask) - 1;
-+ void *dest = get_xsave_addr(xsave, xfeature_nr);
-
- if (dest) {
- u32 size, offset, ecx, edx;
-- cpuid_count(XSTATE_CPUID, index,
-+ cpuid_count(XSTATE_CPUID, xfeature_nr,
- &size, &offset, &ecx, &edx);
-- if (feature == XFEATURE_MASK_PKRU)
-+ if (xfeature_nr == XFEATURE_PKRU)
- memcpy(&vcpu->arch.pkru, src + offset,
- sizeof(vcpu->arch.pkru));
- else
- memcpy(dest, src + offset, size);
- }
-
-- valid -= feature;
-+ valid -= xfeature_mask;
- }
- }
-
-@@ -8839,11 +8839,11 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcp
- if (init_event)
- kvm_put_guest_fpu(vcpu);
- mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave,
-- XFEATURE_MASK_BNDREGS);
-+ XFEATURE_BNDREGS);
- if (mpx_state_buffer)
- memset(mpx_state_buffer, 0, sizeof(struct mpx_bndreg_state));
- mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave,
-- XFEATURE_MASK_BNDCSR);
-+ XFEATURE_BNDCSR);
- if (mpx_state_buffer)
- memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr));
- if (init_event)
---- a/arch/x86/mm/mpx.c
-+++ b/arch/x86/mm/mpx.c
-@@ -142,7 +142,7 @@ int mpx_fault_info(struct mpx_fault_info
- goto err_out;
- }
- /* get bndregs field from current task's xsave area */
-- bndregs = get_xsave_field_ptr(XFEATURE_MASK_BNDREGS);
-+ bndregs = get_xsave_field_ptr(XFEATURE_BNDREGS);
- if (!bndregs) {
- err = -EINVAL;
- goto err_out;
-@@ -190,7 +190,7 @@ static __user void *mpx_get_bounds_dir(v
- * The bounds directory pointer is stored in a register
- * only accessible if we first do an xsave.
- */
-- bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
-+ bndcsr = get_xsave_field_ptr(XFEATURE_BNDCSR);
- if (!bndcsr)
- return MPX_INVALID_BOUNDS_DIR;
-
-@@ -376,7 +376,7 @@ static int do_mpx_bt_fault(void)
- const struct mpx_bndcsr *bndcsr;
- struct mm_struct *mm = current->mm;
-
-- bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
-+ bndcsr = get_xsave_field_ptr(XFEATURE_BNDCSR);
- if (!bndcsr)
- return -EINVAL;
- /*
diff --git a/debian/patches-rt/0012-printk-minimize-console-locking-implementation.patch b/debian/patches-rt/0012-printk-minimize-console-locking-implementation.patch
index e518fe03a..e94a9c468 100644
--- a/debian/patches-rt/0012-printk-minimize-console-locking-implementation.patch
+++ b/debian/patches-rt/0012-printk-minimize-console-locking-implementation.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:50 +0100
Subject: [PATCH 12/25] printk: minimize console locking implementation
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Since printing of the printk buffer is now handled by the printk
kthread, minimize the console locking functions to just handle
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -209,19 +209,7 @@ static int nr_ext_console_drivers;
+@@ -217,19 +217,7 @@ static int nr_ext_console_drivers;
static int __down_trylock_console_sem(unsigned long ip)
{
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 1;
mutex_acquire(&console_lock_dep_map, 0, 1, ip);
return 0;
-@@ -230,13 +218,9 @@ static int __down_trylock_console_sem(un
+@@ -238,13 +226,9 @@ static int __down_trylock_console_sem(un
static void __up_console_sem(unsigned long ip)
{
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#define up_console_sem() __up_console_sem(_RET_IP_)
-@@ -1498,82 +1482,6 @@ static void format_text(struct printk_lo
+@@ -1542,82 +1526,6 @@ static void format_text(struct printk_lo
}
/*
@@ -135,7 +135,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Call the console drivers, asking them to write out
* log_buf[start] to log_buf[end - 1].
* The console_lock must be held.
-@@ -1830,8 +1738,6 @@ static ssize_t msg_print_ext_header(char
+@@ -1879,8 +1787,6 @@ static ssize_t msg_print_ext_header(char
static ssize_t msg_print_ext_body(char *buf, size_t size,
char *dict, size_t dict_len,
char *text, size_t text_len) { return 0; }
@@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void call_console_drivers(const char *ext_text, size_t ext_len,
const char *text, size_t len) {}
static size_t msg_print_text(const struct printk_log *msg, bool syslog,
-@@ -2066,35 +1972,6 @@ int is_console_locked(void)
+@@ -2115,35 +2021,6 @@ int is_console_locked(void)
{
return console_locked;
}
@@ -180,7 +180,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* console_unlock - unlock the console system
-@@ -2102,147 +1979,17 @@ static inline int can_use_console(void)
+@@ -2151,147 +2028,17 @@ static inline int can_use_console(void)
* Releases the console_lock which the caller holds on the console system
* and the console driver list.
*
diff --git a/debian/patches-rt/0012-x86-pkru-Provide-.-_pkru_ins-functions.patch b/debian/patches-rt/0012-x86-pkru-Provide-.-_pkru_ins-functions.patch
deleted file mode 100644
index 13b427d23..000000000
--- a/debian/patches-rt/0012-x86-pkru-Provide-.-_pkru_ins-functions.patch
+++ /dev/null
@@ -1,78 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu, 21 Mar 2019 13:15:32 +0100
-Subject: [PATCH 12/27] x86/pkru: Provide .*_pkru_ins() functions
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Dave Hansen has asked for __read_pkru() and __write_pkru() to be symmetrical.
-As part of the series __write_pkru() will read back the value and only write it
-if it is different.
-In order to make both functions symmetrical move the function containing only
-the opcode into a function with _isn() suffix. __write_pkru() will just invoke
-__write_pkru_isn() but in a flowup patch will also read back the value.
-
-Suggested-by: Dave Hansen <dave.hansen@intel.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/include/asm/pgtable.h | 2 +-
- arch/x86/include/asm/special_insns.h | 12 +++++++++---
- arch/x86/kvm/vmx/vmx.c | 2 +-
- 3 files changed, 11 insertions(+), 5 deletions(-)
-
---- a/arch/x86/include/asm/pgtable.h
-+++ b/arch/x86/include/asm/pgtable.h
-@@ -127,7 +127,7 @@ static inline int pte_dirty(pte_t pte)
- static inline u32 read_pkru(void)
- {
- if (boot_cpu_has(X86_FEATURE_OSPKE))
-- return __read_pkru();
-+ return __read_pkru_ins();
- return 0;
- }
-
---- a/arch/x86/include/asm/special_insns.h
-+++ b/arch/x86/include/asm/special_insns.h
-@@ -92,7 +92,7 @@ static inline void native_write_cr8(unsi
- #endif
-
- #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
--static inline u32 __read_pkru(void)
-+static inline u32 __read_pkru_ins(void)
- {
- u32 ecx = 0;
- u32 edx, pkru;
-@@ -107,7 +107,7 @@ static inline u32 __read_pkru(void)
- return pkru;
- }
-
--static inline void __write_pkru(u32 pkru)
-+static inline void __write_pkru_ins(u32 pkru)
- {
- u32 ecx = 0, edx = 0;
-
-@@ -118,8 +118,14 @@ static inline void __write_pkru(u32 pkru
- asm volatile(".byte 0x0f,0x01,0xef\n\t"
- : : "a" (pkru), "c"(ecx), "d"(edx));
- }
-+
-+static inline void __write_pkru(u32 pkru)
-+{
-+ __write_pkru_ins(pkru);
-+}
-+
- #else
--static inline u32 __read_pkru(void)
-+static inline u32 __read_pkru_ins(void)
- {
- return 0;
- }
---- a/arch/x86/kvm/vmx/vmx.c
-+++ b/arch/x86/kvm/vmx/vmx.c
-@@ -6632,7 +6632,7 @@ static void vmx_vcpu_run(struct kvm_vcpu
- */
- if (static_cpu_has(X86_FEATURE_PKU) &&
- kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
-- vcpu->arch.pkru = __read_pkru();
-+ vcpu->arch.pkru = __read_pkru_ins();
- if (vcpu->arch.pkru != vmx->host_pkru)
- __write_pkru(vmx->host_pkru);
- }
diff --git a/debian/patches-rt/0013-printk-track-seq-per-console.patch b/debian/patches-rt/0013-printk-track-seq-per-console.patch
index c7622d2d6..fed77ce4c 100644
--- a/debian/patches-rt/0013-printk-track-seq-per-console.patch
+++ b/debian/patches-rt/0013-printk-track-seq-per-console.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:51 +0100
Subject: [PATCH 13/25] printk: track seq per console
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Allow each console to track which seq record was last printed. This
simplifies identifying dropped records.
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1453,6 +1453,16 @@ SYSCALL_DEFINE3(syslog, int, type, char
+@@ -1497,6 +1497,16 @@ SYSCALL_DEFINE3(syslog, int, type, char
return do_syslog(type, buf, len, SYSLOG_FROM_READER);
}
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void format_text(struct printk_log *msg, u64 seq,
char *ext_text, size_t *ext_len,
char *text, size_t *len, bool time)
-@@ -1486,7 +1496,7 @@ static void format_text(struct printk_lo
+@@ -1530,7 +1540,7 @@ static void format_text(struct printk_lo
* log_buf[start] to log_buf[end - 1].
* The console_lock must be held.
*/
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
const char *text, size_t len)
{
struct console *con;
-@@ -1504,6 +1514,19 @@ static void call_console_drivers(const c
+@@ -1548,6 +1558,19 @@ static void call_console_drivers(const c
if (!cpu_online(raw_smp_processor_id()) &&
!(con->flags & CON_ANYTIME))
continue;
@@ -71,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (con->flags & CON_EXTENDED)
con->write(con, ext_text, ext_len);
else
-@@ -1738,7 +1761,7 @@ static ssize_t msg_print_ext_header(char
+@@ -1787,7 +1810,7 @@ static ssize_t msg_print_ext_header(char
static ssize_t msg_print_ext_body(char *buf, size_t size,
char *dict, size_t dict_len,
char *text, size_t text_len) { return 0; }
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
const char *text, size_t len) {}
static size_t msg_print_text(const struct printk_log *msg, bool syslog,
bool time, char *buf, size_t size) { return 0; }
-@@ -2481,8 +2504,9 @@ static int printk_kthread_func(void *dat
+@@ -2540,8 +2563,9 @@ static int printk_kthread_func(void *dat
&len, printk_time);
console_lock();
diff --git a/debian/patches-rt/0013-x86-fpu-Only-write-PKRU-if-it-is-different-from-curr.patch b/debian/patches-rt/0013-x86-fpu-Only-write-PKRU-if-it-is-different-from-curr.patch
deleted file mode 100644
index b94b29e2c..000000000
--- a/debian/patches-rt/0013-x86-fpu-Only-write-PKRU-if-it-is-different-from-curr.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 19 Oct 2018 12:46:53 +0200
-Subject: [PATCH 13/27] x86/fpu: Only write PKRU if it is different from
- current
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Dave Hansen says that the `wrpkru' is more expensive than `rdpkru'. It
-has a higher cycle cost and it's also practically a (light) speculation
-barrier.
-
-As an optimisation read the current PKRU value and only write the new
-one if it is different.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/include/asm/special_insns.h | 6 ++++++
- 1 file changed, 6 insertions(+)
-
---- a/arch/x86/include/asm/special_insns.h
-+++ b/arch/x86/include/asm/special_insns.h
-@@ -121,6 +121,12 @@ static inline void __write_pkru_ins(u32
-
- static inline void __write_pkru(u32 pkru)
- {
-+ /*
-+ * WRPKRU is relatively expensive compared to RDPKRU.
-+ * Avoid WRPKRU when it would not change the value.
-+ */
-+ if (pkru == __read_pkru_ins())
-+ return;
- __write_pkru_ins(pkru);
- }
-
diff --git a/debian/patches-rt/0014-printk-do-boot_delay_msec-inside-printk_delay.patch b/debian/patches-rt/0014-printk-do-boot_delay_msec-inside-printk_delay.patch
index 5e9543a2c..f6e138816 100644
--- a/debian/patches-rt/0014-printk-do-boot_delay_msec-inside-printk_delay.patch
+++ b/debian/patches-rt/0014-printk-do-boot_delay_msec-inside-printk_delay.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:52 +0100
Subject: [PATCH 14/25] printk: do boot_delay_msec inside printk_delay
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Both functions needed to be called one after the other, so just
integrate boot_delay_msec into printk_delay for simplification.
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1453,6 +1453,21 @@ SYSCALL_DEFINE3(syslog, int, type, char
+@@ -1497,6 +1497,21 @@ SYSCALL_DEFINE3(syslog, int, type, char
return do_syslog(type, buf, len, SYSLOG_FROM_READER);
}
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void print_console_dropped(struct console *con, u64 count)
{
char text[64];
-@@ -1534,20 +1549,6 @@ static void call_console_drivers(u64 seq
+@@ -1578,20 +1593,6 @@ static void call_console_drivers(u64 seq
}
}
@@ -54,10 +54,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- }
-}
-
- /* FIXME: no support for LOG_CONT */
- #if 0
- /*
-@@ -2506,10 +2507,8 @@ static int printk_kthread_func(void *dat
+ static inline u32 printk_caller_id(void)
+ {
+ return in_task() ? task_pid_nr(current) :
+@@ -2565,10 +2566,8 @@ static int printk_kthread_func(void *dat
console_lock();
call_console_drivers(master_seq, ext_text,
ext_len, text, len);
diff --git a/debian/patches-rt/0014-x86-pkeys-Don-t-check-if-PKRU-is-zero-before-writtin.patch b/debian/patches-rt/0014-x86-pkeys-Don-t-check-if-PKRU-is-zero-before-writtin.patch
deleted file mode 100644
index 4e932e097..000000000
--- a/debian/patches-rt/0014-x86-pkeys-Don-t-check-if-PKRU-is-zero-before-writtin.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 19 Oct 2018 13:59:26 +0200
-Subject: [PATCH 14/27] x86/pkeys: Don't check if PKRU is zero before writting
- it
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-write_pkru() checks if the current value is the same as the expected
-value. So instead just checking if the current and new value is zero
-(and skip the write in such a case) we can benefit from that.
-
-Remove the zero check of PKRU, write_pkru() provides a similar check.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/mm/pkeys.c | 7 -------
- 1 file changed, 7 deletions(-)
-
---- a/arch/x86/mm/pkeys.c
-+++ b/arch/x86/mm/pkeys.c
-@@ -143,13 +143,6 @@ void copy_init_pkru_to_fpregs(void)
- {
- u32 init_pkru_value_snapshot = READ_ONCE(init_pkru_value);
- /*
-- * Any write to PKRU takes it out of the XSAVE 'init
-- * state' which increases context switch cost. Avoid
-- * writing 0 when PKRU was already 0.
-- */
-- if (!init_pkru_value_snapshot && !read_pkru())
-- return;
-- /*
- * Override the PKRU state that came from 'init_fpstate'
- * with the baseline from the process.
- */
diff --git a/debian/patches-rt/0015-printk-print-history-for-new-consoles.patch b/debian/patches-rt/0015-printk-print-history-for-new-consoles.patch
index fe4b6b3db..7036f3c32 100644
--- a/debian/patches-rt/0015-printk-print-history-for-new-consoles.patch
+++ b/debian/patches-rt/0015-printk-print-history-for-new-consoles.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:53 +0100
Subject: [PATCH 15/25] printk: print history for new consoles
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
When new consoles register, they currently print how many messages
they have missed. However, many (or all) of those messages may still
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1506,6 +1506,77 @@ static void format_text(struct printk_lo
+@@ -1550,6 +1550,77 @@ static void format_text(struct printk_lo
}
}
@@ -106,7 +106,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Call the console drivers, asking them to write out
* log_buf[start] to log_buf[end - 1].
-@@ -1524,6 +1595,10 @@ static void call_console_drivers(u64 seq
+@@ -1568,6 +1639,10 @@ static void call_console_drivers(u64 seq
for_each_console(con) {
if (!(con->flags & CON_ENABLED))
continue;
diff --git a/debian/patches-rt/0015-x86-fpu-Eager-switch-PKRU-state.patch b/debian/patches-rt/0015-x86-fpu-Eager-switch-PKRU-state.patch
deleted file mode 100644
index 4b74f9936..000000000
--- a/debian/patches-rt/0015-x86-fpu-Eager-switch-PKRU-state.patch
+++ /dev/null
@@ -1,111 +0,0 @@
-From: Rik van Riel <riel@surriel.com>
-Date: Sun, 9 Sep 2018 18:30:47 +0200
-Subject: [PATCH 15/27] x86/fpu: Eager switch PKRU state
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-While most of a task's FPU state is only needed in user space, the
-protection keys need to be in place immediately after a context switch.
-
-The reason is that any access to userspace memory while running in
-kernel mode also need to abide by the memory permissions specified in
-the protection keys.
-
-The "eager switch" is a preparation for loading the FPU state on return
-to userland. Instead of decoupling PKRU state from xstate I update PKRU
-within xstate on write operations by the kernel.
-
-The read/write_pkru() is moved to another header file so it can easily
-accessed from pgtable.h and fpu/internal.h.
-
-For user tasks we should always get the PKRU from the xsave area and it
-should not change anything because the PKRU value was loaded as part of
-FPU restore.
-For kernel threads we now will have the default "init_pkru_value"
-written. Before this commit the kernel thread would end up with a
-random value which it inherited from the previous user task.
-
-Signed-off-by: Rik van Riel <riel@surriel.com>
-[bigeasy: save pkru to xstate, no cache, don't use __raw_xsave_addr()]
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/include/asm/fpu/internal.h | 24 ++++++++++++++++++++++--
- arch/x86/include/asm/fpu/xstate.h | 1 +
- arch/x86/include/asm/pgtable.h | 6 ++++++
- arch/x86/mm/pkeys.c | 1 -
- 4 files changed, 29 insertions(+), 3 deletions(-)
-
---- a/arch/x86/include/asm/fpu/internal.h
-+++ b/arch/x86/include/asm/fpu/internal.h
-@@ -14,6 +14,7 @@
- #include <linux/compat.h>
- #include <linux/sched.h>
- #include <linux/slab.h>
-+#include <linux/mm.h>
-
- #include <asm/user.h>
- #include <asm/fpu/api.h>
-@@ -562,8 +563,27 @@ switch_fpu_prepare(struct fpu *old_fpu,
- */
- static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
- {
-- if (static_cpu_has(X86_FEATURE_FPU))
-- __fpregs_load_activate(new_fpu, cpu);
-+ struct pkru_state *pk;
-+ u32 pkru_val = init_pkru_value;
-+
-+ if (!static_cpu_has(X86_FEATURE_FPU))
-+ return;
-+
-+ __fpregs_load_activate(new_fpu, cpu);
-+
-+ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
-+ return;
-+
-+ /*
-+ * PKRU state is switched eagerly because it needs to be valid before we
-+ * return to userland e.g. for a copy_to_user() operation.
-+ */
-+ if (current->mm) {
-+ pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
-+ if (pk)
-+ pkru_val = pk->pkru;
-+ }
-+ __write_pkru(pkru_val);
- }
-
- /*
---- a/arch/x86/include/asm/fpu/xstate.h
-+++ b/arch/x86/include/asm/fpu/xstate.h
-@@ -5,6 +5,7 @@
- #include <linux/types.h>
- #include <asm/processor.h>
- #include <linux/uaccess.h>
-+#include <asm/user.h>
-
- /* Bit 63 of XCR0 is reserved for future expansion */
- #define XFEATURE_MASK_EXTEND (~(XFEATURE_MASK_FPSSE | (1ULL << 63)))
---- a/arch/x86/include/asm/pgtable.h
-+++ b/arch/x86/include/asm/pgtable.h
-@@ -1355,6 +1355,12 @@ static inline pmd_t pmd_swp_clear_soft_d
- #define PKRU_WD_BIT 0x2
- #define PKRU_BITS_PER_PKEY 2
-
-+#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
-+extern u32 init_pkru_value;
-+#else
-+#define init_pkru_value 0
-+#endif
-+
- static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
- {
- int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
---- a/arch/x86/mm/pkeys.c
-+++ b/arch/x86/mm/pkeys.c
-@@ -126,7 +126,6 @@ int __arch_override_mprotect_pkey(struct
- * in the process's lifetime will not accidentally get access
- * to data which is pkey-protected later on.
- */
--static
- u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
- PKRU_AD_KEY( 4) | PKRU_AD_KEY( 5) | PKRU_AD_KEY( 6) |
- PKRU_AD_KEY( 7) | PKRU_AD_KEY( 8) | PKRU_AD_KEY( 9) |
diff --git a/debian/patches-rt/0016-printk-implement-CON_PRINTBUFFER.patch b/debian/patches-rt/0016-printk-implement-CON_PRINTBUFFER.patch
index 7990eaef7..513e89c5e 100644
--- a/debian/patches-rt/0016-printk-implement-CON_PRINTBUFFER.patch
+++ b/debian/patches-rt/0016-printk-implement-CON_PRINTBUFFER.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:54 +0100
Subject: [PATCH 16/25] printk: implement CON_PRINTBUFFER
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
If the CON_PRINTBUFFER flag is not set, do not replay the history
for that console.
@@ -9,12 +9,12 @@ for that console.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/printk/printk.c | 25 ++++++-------------------
- 1 file changed, 6 insertions(+), 19 deletions(-)
+ kernel/printk/printk.c | 34 ++++++----------------------------
+ 1 file changed, 6 insertions(+), 28 deletions(-)
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -399,10 +399,6 @@ static u32 log_first_idx;
+@@ -409,10 +409,6 @@ static u32 log_first_idx;
static u64 log_next_seq;
static u32 log_next_idx;
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* the next printk record to read after the last 'clear' command */
static u64 clear_seq;
static u32 clear_idx;
-@@ -1596,8 +1592,12 @@ static void call_console_drivers(u64 seq
+@@ -1640,8 +1636,12 @@ static void call_console_drivers(u64 seq
if (!(con->flags & CON_ENABLED))
continue;
if (!con->wrote_history) {
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
if (!con->write)
continue;
-@@ -1822,8 +1822,6 @@ EXPORT_SYMBOL(printk);
+@@ -1871,8 +1871,6 @@ EXPORT_SYMBOL(printk);
static u64 syslog_seq;
static u32 syslog_idx;
@@ -49,7 +49,23 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static u64 log_first_seq;
static u32 log_first_idx;
static u64 log_next_seq;
-@@ -2224,7 +2222,6 @@ early_param("keep_bootcon", keep_bootcon
+@@ -2196,15 +2194,6 @@ void console_flush_on_panic(enum con_flu
+ */
+ console_trylock();
+ console_may_schedule = 0;
+-
+- if (mode == CONSOLE_REPLAY_ALL) {
+- unsigned long flags;
+-
+- logbuf_lock_irqsave(flags);
+- console_seq = log_first_seq;
+- console_idx = log_first_idx;
+- logbuf_unlock_irqrestore(flags);
+- }
+ console_unlock();
+ }
+
+@@ -2283,7 +2272,6 @@ early_param("keep_bootcon", keep_bootcon
void register_console(struct console *newcon)
{
int i;
@@ -57,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct console *bcon = NULL;
struct console_cmdline *c;
static bool has_preferred;
-@@ -2340,16 +2337,6 @@ void register_console(struct console *ne
+@@ -2399,16 +2387,6 @@ void register_console(struct console *ne
if (newcon->flags & CON_EXTENDED)
nr_ext_console_drivers++;
diff --git a/debian/patches-rt/0016-x86-entry-Add-TIF_NEED_FPU_LOAD.patch b/debian/patches-rt/0016-x86-entry-Add-TIF_NEED_FPU_LOAD.patch
deleted file mode 100644
index 6d48413dd..000000000
--- a/debian/patches-rt/0016-x86-entry-Add-TIF_NEED_FPU_LOAD.patch
+++ /dev/null
@@ -1,57 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 5 Sep 2018 18:34:47 +0200
-Subject: [PATCH 16/27] x86/entry: Add TIF_NEED_FPU_LOAD
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Add TIF_NEED_FPU_LOAD. This is reserved for loading the FPU registers
-before returning to userland. This flag must not be set for systems
-without a FPU.
-If this flag is cleared, the CPU's FPU register hold the current content
-of current()'s FPU register. The in-memory copy (union fpregs_state) is
-not valid.
-If this flag is set, then all of CPU's FPU register may hold a random
-value (except for PKRU) and it is required to load the content of the
-FPU register on return to userland.
-
-It is introduced now, so we can add code handling it now before adding
-the main feature.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/include/asm/fpu/internal.h | 6 ++++++
- arch/x86/include/asm/thread_info.h | 2 ++
- 2 files changed, 8 insertions(+)
-
---- a/arch/x86/include/asm/fpu/internal.h
-+++ b/arch/x86/include/asm/fpu/internal.h
-@@ -538,6 +538,12 @@ static inline void __fpregs_load_activat
- *
- * The FPU context is only stored/restore for user task and ->mm is used to
- * distinguish between kernel and user threads.
-+ *
-+ * If TIF_NEED_FPU_LOAD is cleared then the CPU's FPU registers are saved in
-+ * the current thread's FPU registers state.
-+ * If TIF_NEED_FPU_LOAD is set then CPU's FPU registers may not hold current()'s
-+ * FPU registers. It is required to load the registers before returning to
-+ * userland or using the content otherwise.
- */
- static inline void
- switch_fpu_prepare(struct fpu *old_fpu, int cpu)
---- a/arch/x86/include/asm/thread_info.h
-+++ b/arch/x86/include/asm/thread_info.h
-@@ -88,6 +88,7 @@ struct thread_info {
- #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
- #define TIF_UPROBE 12 /* breakpointed or singlestepping */
- #define TIF_PATCH_PENDING 13 /* pending live patching update */
-+#define TIF_NEED_FPU_LOAD 14 /* load FPU on return to userspace */
- #define TIF_NOCPUID 15 /* CPUID is not accessible in userland */
- #define TIF_NOTSC 16 /* TSC is not accessible in userland */
- #define TIF_IA32 17 /* IA32 compatibility process */
-@@ -117,6 +118,7 @@ struct thread_info {
- #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
- #define _TIF_UPROBE (1 << TIF_UPROBE)
- #define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
-+#define _TIF_NEED_FPU_LOAD (1 << TIF_NEED_FPU_LOAD)
- #define _TIF_NOCPUID (1 << TIF_NOCPUID)
- #define _TIF_NOTSC (1 << TIF_NOTSC)
- #define _TIF_IA32 (1 << TIF_IA32)
diff --git a/debian/patches-rt/0017-printk-add-processor-number-to-output.patch b/debian/patches-rt/0017-printk-add-processor-number-to-output.patch
index 3c17a6dda..805335aca 100644
--- a/debian/patches-rt/0017-printk-add-processor-number-to-output.patch
+++ b/debian/patches-rt/0017-printk-add-processor-number-to-output.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:55 +0100
Subject: [PATCH 17/25] printk: add processor number to output
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
It can be difficult to sort printk out if multiple processors are
printing simultaneously. Add the processor number to the printk
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -331,6 +331,7 @@ enum log_flags {
+@@ -338,6 +338,7 @@ enum log_flags {
struct printk_log {
u64 ts_nsec; /* timestamp in nanoseconds */
@@ -23,36 +23,36 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
u16 len; /* length of entire record */
u16 text_len; /* length of text buffer */
u16 dict_len; /* length of dictionary buffer */
-@@ -475,7 +476,7 @@ static u32 log_next(u32 idx)
+@@ -489,7 +490,7 @@ static u32 log_next(u32 idx)
/* insert record into the buffer, discard old ones, update heads */
- static int log_store(int facility, int level,
+ static int log_store(u32 caller_id, int facility, int level,
- enum log_flags flags, u64 ts_nsec,
+ enum log_flags flags, u64 ts_nsec, u16 cpu,
const char *dict, u16 dict_len,
const char *text, u16 text_len)
{
-@@ -506,6 +507,7 @@ static int log_store(int facility, int l
- msg->level = level & 7;
- msg->flags = flags & 0x1f;
- msg->ts_nsec = ts_nsec;
+@@ -523,6 +524,7 @@ static int log_store(u32 caller_id, int
+ #ifdef CONFIG_PRINTK_CALLER
+ msg->caller_id = caller_id;
+ #endif
+ msg->cpu = cpu;
msg->len = size;
/* insert message */
-@@ -570,9 +572,9 @@ static ssize_t msg_print_ext_header(char
+@@ -596,9 +598,9 @@ static ssize_t msg_print_ext_header(char
do_div(ts_usec, 1000);
-- return scnprintf(buf, size, "%u,%llu,%llu,%c;",
-+ return scnprintf(buf, size, "%u,%llu,%llu,%c,%hu;",
- (msg->facility << 3) | msg->level, seq, ts_usec,
-- msg->flags & LOG_CONT ? 'c' : '-');
-+ msg->flags & LOG_CONT ? 'c' : '-', msg->cpu);
+- return scnprintf(buf, size, "%u,%llu,%llu,%c%s;",
++ return scnprintf(buf, size, "%u,%llu,%llu,%c%s,%hu;",
+ (msg->facility << 3) | msg->level, seq, ts_usec,
+- msg->flags & LOG_CONT ? 'c' : '-', caller);
++ msg->flags & LOG_CONT ? 'c' : '-', caller, msg->cpu);
}
static ssize_t msg_print_ext_body(char *buf, size_t size,
-@@ -1110,6 +1112,11 @@ static inline void boot_delay_msec(int l
+@@ -1132,6 +1134,11 @@ static inline void boot_delay_msec(int l
static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME);
module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
@@ -64,15 +64,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static size_t print_syslog(unsigned int level, char *buf)
{
return sprintf(buf, "<%u>", level);
-@@ -1132,6 +1139,7 @@ static size_t print_prefix(const struct
- len = print_syslog((msg->facility << 3) | msg->level, buf);
- if (time)
- len += print_time(msg->ts_nsec, buf + len);
+@@ -1175,6 +1182,7 @@ static size_t print_prefix(const struct
+ buf[len++] = ' ';
+ buf[len] = '\0';
+ }
+ len += print_cpu(msg->cpu, buf + len);
+
return len;
}
-
-@@ -1698,6 +1706,7 @@ asmlinkage int vprintk_emit(int facility
+@@ -1750,6 +1758,7 @@ asmlinkage int vprintk_emit(int facility
u64 ts_nsec;
char *text;
char *rbuf;
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ts_nsec = local_clock();
-@@ -1707,6 +1716,8 @@ asmlinkage int vprintk_emit(int facility
+@@ -1759,6 +1768,8 @@ asmlinkage int vprintk_emit(int facility
return printed_len;
}
@@ -89,12 +89,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
text = rbuf;
text_len = vscnprintf(text, PRINTK_SPRINT_MAX, fmt, args);
-@@ -1744,7 +1755,7 @@ asmlinkage int vprintk_emit(int facility
+@@ -1793,7 +1804,7 @@ asmlinkage int vprintk_emit(int facility
if (dict)
- lflags |= LOG_PREFIX|LOG_NEWLINE;
+ lflags |= LOG_NEWLINE;
-- printed_len = log_store(facility, level, lflags, ts_nsec,
-+ printed_len = log_store(facility, level, lflags, ts_nsec, cpu,
+- printed_len = log_store(caller_id, facility, level, lflags, ts_nsec,
++ printed_len = log_store(caller_id, facility, level, lflags, ts_nsec, cpu,
dict, dictlen, text, text_len);
prb_commit(&h);
diff --git a/debian/patches-rt/0017-x86-fpu-Always-store-the-registers-in-copy_fpstate_t.patch b/debian/patches-rt/0017-x86-fpu-Always-store-the-registers-in-copy_fpstate_t.patch
deleted file mode 100644
index bf990ae4b..000000000
--- a/debian/patches-rt/0017-x86-fpu-Always-store-the-registers-in-copy_fpstate_t.patch
+++ /dev/null
@@ -1,71 +0,0 @@
-From: Rik van Riel <riel@surriel.com>
-Date: Sun, 9 Sep 2018 18:30:50 +0200
-Subject: [PATCH 17/27] x86/fpu: Always store the registers in
- copy_fpstate_to_sigframe()
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-copy_fpstate_to_sigframe() stores the registers directly to user space.
-This is okay because the FPU register are valid and saving it directly
-avoids saving it into kernel memory and making a copy.
-However… We can't keep doing this if we are going to restore the FPU
-registers on the return to userland. It is possible that the FPU
-registers will be invalidated in the middle of the save operation and
-this should be done with disabled preemption / BH.
-
-Save the FPU registers to task's FPU struct and copy them to the user
-memory later on.
-
-This code is extracted from an earlier version of the patchset while
-there still was lazy-FPU on x86.
-
-Signed-off-by: Rik van Riel <riel@surriel.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/fpu/signal.c | 19 ++++++++++++++-----
- 1 file changed, 14 insertions(+), 5 deletions(-)
-
---- a/arch/x86/kernel/fpu/signal.c
-+++ b/arch/x86/kernel/fpu/signal.c
-@@ -144,8 +144,8 @@ static inline int copy_fpregs_to_sigfram
- * buf == buf_fx for 64-bit frames and 32-bit fsave frame.
- * buf != buf_fx for 32-bit frames with fxstate.
- *
-- * Save the state directly to the user frame pointed by the aligned pointer
-- * 'buf_fx'.
-+ * Save the state to task's fpu->state and then copy it to the user frame
-+ * pointed by the aligned pointer 'buf_fx'.
- *
- * If this is a 32-bit frame with fxstate, put a fsave header before
- * the aligned state at 'buf_fx'.
-@@ -155,6 +155,8 @@ static inline int copy_fpregs_to_sigfram
- */
- int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
- {
-+ struct fpu *fpu = &current->thread.fpu;
-+ struct xregs_state *xsave = &fpu->state.xsave;
- struct task_struct *tsk = current;
- int ia32_fxstate = (buf != buf_fx);
-
-@@ -169,9 +171,16 @@ int copy_fpstate_to_sigframe(void __user
- sizeof(struct user_i387_ia32_struct), NULL,
- (struct _fpstate_32 __user *) buf) ? -1 : 1;
-
-- /* Save the live register state to the user directly. */
-- if (copy_fpregs_to_sigframe(buf_fx))
-- return -1;
-+ copy_fpregs_to_fpstate(fpu);
-+
-+ if (using_compacted_format()) {
-+ if (copy_xstate_to_user(buf_fx, xsave, 0, size))
-+ return -1;
-+ } else {
-+ fpstate_sanitize_xstate(fpu);
-+ if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
-+ return -1;
-+ }
-
- /* Save the fsave header for the 32-bit frames. */
- if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
diff --git a/debian/patches-rt/0018-console-add-write_atomic-interface.patch b/debian/patches-rt/0018-console-add-write_atomic-interface.patch
index 4cc9210d3..529d5f754 100644
--- a/debian/patches-rt/0018-console-add-write_atomic-interface.patch
+++ b/debian/patches-rt/0018-console-add-write_atomic-interface.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:56 +0100
Subject: [PATCH 18/25] console: add write_atomic interface
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Add a write_atomic callback to the console. This is an optional
function for console drivers. The function must be atomic (including
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int (*read)(struct console *, char *, unsigned);
struct tty_driver *(*device)(struct console *, int *);
void (*unblank)(void);
-@@ -231,4 +232,7 @@ extern void console_init(void);
+@@ -236,4 +237,7 @@ extern void console_init(void);
void dummycon_register_output_notifier(struct notifier_block *nb);
void dummycon_unregister_output_notifier(struct notifier_block *nb);
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* _LINUX_CONSOLE_H */
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -2984,3 +2984,15 @@ void kmsg_dump_rewind(struct kmsg_dumper
+@@ -3034,3 +3034,15 @@ void kmsg_dump_rewind(struct kmsg_dumper
}
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
#endif
diff --git a/debian/patches-rt/0018-x86-fpu-Prepare-copy_fpstate_to_sigframe-for-TIF_NEE.patch b/debian/patches-rt/0018-x86-fpu-Prepare-copy_fpstate_to_sigframe-for-TIF_NEE.patch
deleted file mode 100644
index 775aed89a..000000000
--- a/debian/patches-rt/0018-x86-fpu-Prepare-copy_fpstate_to_sigframe-for-TIF_NEE.patch
+++ /dev/null
@@ -1,36 +0,0 @@
-From: Rik van Riel <riel@surriel.com>
-Date: Sun, 9 Sep 2018 18:30:51 +0200
-Subject: [PATCH 18/27] x86/fpu: Prepare copy_fpstate_to_sigframe() for
- TIF_NEED_FPU_LOAD
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-The FPU registers need only to be saved if TIF_NEED_FPU_LOAD is not set.
-Otherwise this has been already done and can be skipped.
-
-Signed-off-by: Rik van Riel <riel@surriel.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/fpu/signal.c | 12 +++++++++++-
- 1 file changed, 11 insertions(+), 1 deletion(-)
-
---- a/arch/x86/kernel/fpu/signal.c
-+++ b/arch/x86/kernel/fpu/signal.c
-@@ -171,7 +171,17 @@ int copy_fpstate_to_sigframe(void __user
- sizeof(struct user_i387_ia32_struct), NULL,
- (struct _fpstate_32 __user *) buf) ? -1 : 1;
-
-- copy_fpregs_to_fpstate(fpu);
-+ fpregs_lock();
-+ /*
-+ * If we do not need to load the FPU registers at return to userspace
-+ * then the CPU has the current state and we need to save it. Otherwise
-+ * it is already done and we can skip it.
-+ */
-+ if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
-+ copy_fpregs_to_fpstate(fpu);
-+ set_thread_flag(TIF_NEED_FPU_LOAD);
-+ }
-+ fpregs_unlock();
-
- if (using_compacted_format()) {
- if (copy_xstate_to_user(buf_fx, xsave, 0, size))
diff --git a/debian/patches-rt/0019-printk-introduce-emergency-messages.patch b/debian/patches-rt/0019-printk-introduce-emergency-messages.patch
index cb44efdfe..c151b9128 100644
--- a/debian/patches-rt/0019-printk-introduce-emergency-messages.patch
+++ b/debian/patches-rt/0019-printk-introduce-emergency-messages.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:57 +0100
Subject: [PATCH 19/25] printk: introduce emergency messages
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Console messages are generally either critical or non-critical.
Critical messages are messages such as crashes or sysrq output.
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
-@@ -59,6 +59,7 @@ static inline const char *printk_skip_he
+@@ -58,6 +58,7 @@ static inline const char *printk_skip_he
*/
#define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT
#define CONSOLE_LOGLEVEL_QUIET CONFIG_CONSOLE_LOGLEVEL_QUIET
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern int console_printk[];
-@@ -66,6 +67,7 @@ extern int console_printk[];
+@@ -65,6 +66,7 @@ extern int console_printk[];
#define default_message_loglevel (console_printk[1])
#define minimum_console_loglevel (console_printk[2])
#define default_console_loglevel (console_printk[3])
@@ -60,7 +60,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -45,6 +45,7 @@
+@@ -46,6 +46,7 @@
#include <linux/ctype.h>
#include <linux/uio.h>
#include <linux/kthread.h>
@@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/printk_ringbuffer.h>
#include <linux/sched/clock.h>
#include <linux/sched/debug.h>
-@@ -61,11 +62,12 @@
+@@ -62,11 +63,12 @@
#include "braille.h"
#include "internal.h"
@@ -80,9 +80,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */
+ CONSOLE_LOGLEVEL_EMERGENCY, /* emergency_console_loglevel */
};
+ EXPORT_SYMBOL_GPL(console_printk);
- atomic_t ignore_console_lock_warning __read_mostly = ATOMIC_INIT(0);
-@@ -474,6 +476,9 @@ static u32 log_next(u32 idx)
+@@ -488,6 +490,9 @@ static u32 log_next(u32 idx)
return idx + msg->len;
}
@@ -90,9 +90,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ char *text, u16 text_len);
+
/* insert record into the buffer, discard old ones, update heads */
- static int log_store(int facility, int level,
+ static int log_store(u32 caller_id, int facility, int level,
enum log_flags flags, u64 ts_nsec, u16 cpu,
-@@ -1587,7 +1592,7 @@ static void printk_write_history(struct
+@@ -1631,7 +1636,7 @@ static void printk_write_history(struct
* The console_lock must be held.
*/
static void call_console_drivers(u64 seq, const char *ext_text, size_t ext_len,
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct console *con;
-@@ -1607,6 +1612,18 @@ static void call_console_drivers(u64 seq
+@@ -1651,6 +1656,18 @@ static void call_console_drivers(u64 seq
con->wrote_history = 1;
con->printk_seq = seq - 1;
}
@@ -120,7 +120,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!con->write)
continue;
if (!cpu_online(raw_smp_processor_id()) &&
-@@ -1718,8 +1735,12 @@ asmlinkage int vprintk_emit(int facility
+@@ -1770,8 +1787,12 @@ asmlinkage int vprintk_emit(int facility
cpu = raw_smp_processor_id();
@@ -135,9 +135,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* strip and flag a trailing newline */
if (text_len && text[text_len-1] == '\n') {
-@@ -1755,6 +1776,14 @@ asmlinkage int vprintk_emit(int facility
+@@ -1804,6 +1825,14 @@ asmlinkage int vprintk_emit(int facility
if (dict)
- lflags |= LOG_PREFIX|LOG_NEWLINE;
+ lflags |= LOG_NEWLINE;
+ /*
+ * NOTE:
@@ -147,10 +147,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ */
+ printk_emergency(rbuf, level, ts_nsec, cpu, text, text_len);
+
- printed_len = log_store(facility, level, lflags, ts_nsec, cpu,
+ printed_len = log_store(caller_id, facility, level, lflags, ts_nsec, cpu,
dict, dictlen, text, text_len);
-@@ -1847,7 +1876,7 @@ static ssize_t msg_print_ext_body(char *
+@@ -1896,7 +1925,7 @@ static ssize_t msg_print_ext_body(char *
char *dict, size_t dict_len,
char *text, size_t text_len) { return 0; }
static void call_console_drivers(u64 seq, const char *ext_text, size_t ext_len,
@@ -159,7 +159,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static size_t msg_print_text(const struct printk_log *msg, bool syslog,
bool time, char *buf, size_t size) { return 0; }
static bool suppress_message_printing(int level) { return false; }
-@@ -2579,7 +2608,7 @@ static int printk_kthread_func(void *dat
+@@ -2629,7 +2658,7 @@ static int printk_kthread_func(void *dat
console_lock();
call_console_drivers(master_seq, ext_text,
@@ -168,7 +168,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (len > 0 || ext_len > 0)
printk_delay(msg->level);
console_unlock();
-@@ -2983,6 +3012,76 @@ void kmsg_dump_rewind(struct kmsg_dumper
+@@ -3033,6 +3062,76 @@ void kmsg_dump_rewind(struct kmsg_dumper
logbuf_unlock_irqrestore(flags);
}
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
@@ -247,7 +247,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void console_atomic_lock(unsigned int *flags)
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
-@@ -43,6 +43,23 @@ config CONSOLE_LOGLEVEL_QUIET
+@@ -61,6 +61,23 @@ config CONSOLE_LOGLEVEL_QUIET
will be used as the loglevel. IOW passing "quiet" will be the
equivalent of passing "loglevel=<CONSOLE_LOGLEVEL_QUIET>"
diff --git a/debian/patches-rt/0019-x86-fpu-Update-xstate-s-PKRU-value-on-write_pkru.patch b/debian/patches-rt/0019-x86-fpu-Update-xstate-s-PKRU-value-on-write_pkru.patch
deleted file mode 100644
index 024381676..000000000
--- a/debian/patches-rt/0019-x86-fpu-Update-xstate-s-PKRU-value-on-write_pkru.patch
+++ /dev/null
@@ -1,53 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 19 Oct 2018 23:50:27 +0200
-Subject: [PATCH 19/27] x86/fpu: Update xstate's PKRU value on write_pkru()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-During the context switch the xstate is loaded which also includes the
-PKRU value.
-If xstate is restored on return to userland it is required that the
-PKRU value in xstate is the same as the one in the CPU.
-
-Save the PKRU in xstate during modification.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/include/asm/pgtable.h | 20 ++++++++++++++++++--
- 1 file changed, 18 insertions(+), 2 deletions(-)
-
---- a/arch/x86/include/asm/pgtable.h
-+++ b/arch/x86/include/asm/pgtable.h
-@@ -23,6 +23,8 @@
-
- #ifndef __ASSEMBLY__
- #include <asm/x86_init.h>
-+#include <asm/fpu/xstate.h>
-+#include <asm/fpu/api.h>
-
- extern pgd_t early_top_pgt[PTRS_PER_PGD];
- int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
-@@ -133,8 +135,22 @@ static inline u32 read_pkru(void)
-
- static inline void write_pkru(u32 pkru)
- {
-- if (boot_cpu_has(X86_FEATURE_OSPKE))
-- __write_pkru(pkru);
-+ struct pkru_state *pk;
-+
-+ if (!boot_cpu_has(X86_FEATURE_OSPKE))
-+ return;
-+
-+ pk = get_xsave_addr(&current->thread.fpu.state.xsave, XFEATURE_PKRU);
-+ /*
-+ * The PKRU value in xstate needs to be in sync with the value that is
-+ * written to the CPU. The FPU restore on return to userland would
-+ * otherwise load the previous value again.
-+ */
-+ fpregs_lock();
-+ if (pk)
-+ pk->pkru = pkru;
-+ __write_pkru(pkru);
-+ fpregs_unlock();
- }
-
- static inline int pte_young(pte_t pte)
diff --git a/debian/patches-rt/0020-serial-8250-implement-write_atomic.patch b/debian/patches-rt/0020-serial-8250-implement-write_atomic.patch
index 0a153748f..92ae5adda 100644
--- a/debian/patches-rt/0020-serial-8250-implement-write_atomic.patch
+++ b/debian/patches-rt/0020-serial-8250-implement-write_atomic.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:58 +0100
Subject: [PATCH 20/25] serial: 8250: implement write_atomic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Implement a non-sleeping NMI-safe write_atomic console function in
order to support emergency printk messages.
@@ -423,7 +423,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
else if (probe)
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
-@@ -11,6 +11,7 @@
+@@ -7,6 +7,7 @@
#ifndef _LINUX_SERIAL_8250_H
#define _LINUX_SERIAL_8250_H
@@ -431,7 +431,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/platform_device.h>
-@@ -126,6 +127,8 @@ struct uart_8250_port {
+@@ -122,6 +123,8 @@ struct uart_8250_port {
#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
unsigned char msr_saved_flags;
@@ -440,7 +440,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct uart_8250_dma *dma;
const struct uart_8250_ops *ops;
-@@ -177,6 +180,8 @@ void serial8250_init_port(struct uart_82
+@@ -173,6 +176,8 @@ void serial8250_init_port(struct uart_82
void serial8250_set_defaults(struct uart_8250_port *up);
void serial8250_console_write(struct uart_8250_port *up, const char *s,
unsigned int count);
diff --git a/debian/patches-rt/0020-x86-fpu-Inline-copy_user_to_fpregs_zeroing.patch b/debian/patches-rt/0020-x86-fpu-Inline-copy_user_to_fpregs_zeroing.patch
deleted file mode 100644
index bab5d6bab..000000000
--- a/debian/patches-rt/0020-x86-fpu-Inline-copy_user_to_fpregs_zeroing.patch
+++ /dev/null
@@ -1,47 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 7 Nov 2018 15:06:06 +0100
-Subject: [PATCH 20/27] x86/fpu: Inline copy_user_to_fpregs_zeroing()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Start refactoring __fpu__restore_sig() by inlining
-copy_user_to_fpregs_zeroing(). The orignal function remains and will be
-used to restore from userland memory if possible.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/fpu/signal.c | 20 +++++++++++++++++++-
- 1 file changed, 19 insertions(+), 1 deletion(-)
-
---- a/arch/x86/kernel/fpu/signal.c
-+++ b/arch/x86/kernel/fpu/signal.c
-@@ -337,11 +337,29 @@ static int __fpu__restore_sig(void __use
- kfree(tmp);
- return err;
- } else {
-+ int ret;
-+
- /*
- * For 64-bit frames and 32-bit fsave frames, restore the user
- * state to the registers directly (with exceptions handled).
- */
-- if (copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only)) {
-+ if (use_xsave()) {
-+ if ((unsigned long)buf_fx % 64 || fx_only) {
-+ u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
-+ copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
-+ ret = copy_user_to_fxregs(buf_fx);
-+ } else {
-+ u64 init_bv = xfeatures_mask & ~xfeatures;
-+ if (unlikely(init_bv))
-+ copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
-+ ret = copy_user_to_xregs(buf_fx, xfeatures);
-+ }
-+ } else if (use_fxsr()) {
-+ ret = copy_user_to_fxregs(buf_fx);
-+ } else
-+ ret = copy_user_to_fregs(buf_fx);
-+
-+ if (ret) {
- fpu__clear(fpu);
- return -1;
- }
diff --git a/debian/patches-rt/0021-printk-implement-KERN_CONT.patch b/debian/patches-rt/0021-printk-implement-KERN_CONT.patch
index fe2be0051..6aa653532 100644
--- a/debian/patches-rt/0021-printk-implement-KERN_CONT.patch
+++ b/debian/patches-rt/0021-printk-implement-KERN_CONT.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:29:59 +0100
Subject: [PATCH 21/25] printk: implement KERN_CONT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Implement KERN_CONT based on the printing CPU rather than on the
printing task. As long as the KERN_CONT messages are coming from the
@@ -11,13 +11,13 @@ to belong to each other.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/printk/printk.c | 73 ++++++++++++++++++++++++++++---------------------
- 1 file changed, 42 insertions(+), 31 deletions(-)
+ kernel/printk/printk.c | 65 +++++++++++++++++++++++++++----------------------
+ 1 file changed, 37 insertions(+), 28 deletions(-)
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1649,8 +1649,6 @@ static void call_console_drivers(u64 seq
- }
+@@ -1699,8 +1699,6 @@ static inline u32 printk_caller_id(void)
+ 0x80000000 + raw_smp_processor_id();
}
-/* FIXME: no support for LOG_CONT */
@@ -25,12 +25,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Continuation lines are buffered, and not committed to the record buffer
* until the line is complete, or a race forces it. The line fragments
-@@ -1660,52 +1658,57 @@ static void call_console_drivers(u64 seq
- static struct cont {
+@@ -1711,52 +1709,55 @@ static struct cont {
char buf[LOG_LINE_MAX];
size_t len; /* length == 0 means unused buffer */
-- struct task_struct *owner; /* task of first print*/
-+ int cpu_owner; /* cpu of first print*/
+ u32 caller_id; /* printk_caller_id() of first print */
++ int cpu_owner; /* cpu of first print */
u64 ts_nsec; /* time of first print */
u8 level; /* log level of first message */
u8 facility; /* log facility of first message */
@@ -47,17 +46,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ if (c->len == 0)
return;
-- log_store(cont.facility, cont.level, cont.flags, cont.ts_nsec,
-- NULL, 0, cont.buf, cont.len);
+- log_store(cont.caller_id, cont.facility, cont.level, cont.flags,
+- cont.ts_nsec, NULL, 0, cont.buf, cont.len);
- cont.len = 0;
-+ log_store(c->facility, c->level, c->flags, c->ts_nsec, c->cpu_owner,
-+ NULL, 0, c->buf, c->len);
++ log_store(c->caller_id, c->facility, c->level, c->flags,
++ c->ts_nsec, c->cpu_owner, NULL, 0, c->buf, c->len);
+ c->len = 0;
}
--static bool cont_add(int facility, int level, enum log_flags flags, const char *text, size_t len)
-+static void cont_add(int ctx, int cpu, int facility, int level,
-+ enum log_flags flags, const char *text, size_t len)
+-static bool cont_add(u32 caller_id, int facility, int level,
++static void cont_add(int ctx, int cpu, u32 caller_id, int facility, int level,
+ enum log_flags flags, const char *text, size_t len)
{
+ struct cont *c = &cont[ctx];
+
@@ -75,15 +74,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- if (!cont.len) {
- cont.facility = facility;
- cont.level = level;
-- cont.owner = current;
+- cont.caller_id = caller_id;
- cont.ts_nsec = local_clock();
- cont.flags = flags;
+ if (!c->len) {
+ c->facility = facility;
+ c->level = level;
-+ c->cpu_owner = cpu;
++ c->caller_id = caller_id;
+ c->ts_nsec = local_clock();
+ c->flags = flags;
++ c->cpu_owner = cpu;
}
- memcpy(cont.buf + cont.len, text, len);
@@ -91,17 +91,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ memcpy(c->buf + c->len, text, len);
+ c->len += len;
-- // The original flags come from the first line,
-- // but later continuations can add a newline.
-+ /*
-+ * The original flags come from the first line,
-+ * but later continuations can add a newline.
-+ */
+ // The original flags come from the first line,
+ // but later continuations can add a newline.
if (flags & LOG_NEWLINE) {
- cont.flags |= LOG_NEWLINE;
- cont_flush();
+ c->flags |= LOG_NEWLINE;
-+ cont_flush(ctx);
}
-
- return true;
@@ -110,27 +105,27 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* ring buffer used as memory allocator for temporary sprint buffers */
DECLARE_STATIC_PRINTKRB(sprint_rb,
-@@ -1717,6 +1720,7 @@ asmlinkage int vprintk_emit(int facility
+@@ -1768,6 +1769,7 @@ asmlinkage int vprintk_emit(int facility
const char *fmt, va_list args)
{
- enum log_flags lflags = 0;
+ const u32 caller_id = printk_caller_id();
+ int ctx = !!in_nmi();
+ enum log_flags lflags = 0;
int printed_len = 0;
struct prb_handle h;
- size_t text_len;
-@@ -1784,8 +1788,15 @@ asmlinkage int vprintk_emit(int facility
+@@ -1833,8 +1835,15 @@ asmlinkage int vprintk_emit(int facility
*/
printk_emergency(rbuf, level, ts_nsec, cpu, text, text_len);
-- printed_len = log_store(facility, level, lflags, ts_nsec, cpu,
+- printed_len = log_store(caller_id, facility, level, lflags, ts_nsec, cpu,
- dict, dictlen, text, text_len);
+ if ((lflags & LOG_CONT) || !(lflags & LOG_NEWLINE)) {
-+ cont_add(ctx, cpu, facility, level, lflags, text, text_len);
-+ printed_len = text_len;
++ cont_add(ctx, cpu, caller_id, facility, level, lflags, text, text_len);
++ printed_len = text_len;
+ } else {
+ if (cpu == cont[ctx].cpu_owner)
+ cont_flush(ctx);
-+ printed_len = log_store(facility, level, lflags, ts_nsec, cpu,
++ printed_len = log_store(caller_id, facility, level, lflags, ts_nsec, cpu,
+ dict, dictlen, text, text_len);
+ }
diff --git a/debian/patches-rt/0021-x86-fpu-Let-__fpu__restore_sig-restore-the-32bit-fxs.patch b/debian/patches-rt/0021-x86-fpu-Let-__fpu__restore_sig-restore-the-32bit-fxs.patch
deleted file mode 100644
index 8f43994f5..000000000
--- a/debian/patches-rt/0021-x86-fpu-Let-__fpu__restore_sig-restore-the-32bit-fxs.patch
+++ /dev/null
@@ -1,196 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 27 Nov 2018 17:48:32 +0100
-Subject: [PATCH 21/27] x86/fpu: Let __fpu__restore_sig() restore the
- !32bit+fxsr frame from kernel memory
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-The !32bit+fxsr case loads the new state from user memory. In case we
-restore the FPU state on return to userland we can't do this. It would
-be required to disable preemption in order to avoid a context switch
-which would set TIF_NEED_FPU_LOAD. If this happens before the "restore"
-operation then the loaded registers would become volatile.
-
-Disabling preemption while accessing user memory requires to disable the
-pagefault handler. An error during XRSTOR would then mean that either a
-page fault occured (and we have to retry with enabled page fault
-handler) or a #GP occured because the xstate is bogus (after all the
-sig-handler can modify it).
-
-In order to avoid that mess, copy the FPU state from userland, validate
-it and then load it. The copy_users_…() helper are basically the old
-helper except that they operate on kernel memory and the fault handler
-just sets the error value and the caller handles it.
-
-copy_user_to_fpregs_zeroing() and its helpers remain and will be used
-later for a fastpath optimisation.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/include/asm/fpu/internal.h | 43 ++++++++++++++++++++++++
- arch/x86/kernel/fpu/signal.c | 62 ++++++++++++++++++++++++++++--------
- 2 files changed, 92 insertions(+), 13 deletions(-)
-
---- a/arch/x86/include/asm/fpu/internal.h
-+++ b/arch/x86/include/asm/fpu/internal.h
-@@ -121,6 +121,21 @@ extern void fpstate_sanitize_xstate(stru
- err; \
- })
-
-+#define kernel_insn_err(insn, output, input...) \
-+({ \
-+ int err; \
-+ asm volatile("1:" #insn "\n\t" \
-+ "2:\n" \
-+ ".section .fixup,\"ax\"\n" \
-+ "3: movl $-1,%[err]\n" \
-+ " jmp 2b\n" \
-+ ".previous\n" \
-+ _ASM_EXTABLE(1b, 3b) \
-+ : [err] "=r" (err), output \
-+ : "0"(0), input); \
-+ err; \
-+})
-+
- #define kernel_insn(insn, output, input...) \
- asm volatile("1:" #insn "\n\t" \
- "2:\n" \
-@@ -157,6 +172,14 @@ static inline void copy_kernel_to_fxregs
- }
- }
-
-+static inline int copy_kernel_to_fxregs_err(struct fxregs_state *fx)
-+{
-+ if (IS_ENABLED(CONFIG_X86_32))
-+ return kernel_insn_err(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
-+ else
-+ return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
-+}
-+
- static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
- {
- if (IS_ENABLED(CONFIG_X86_32))
-@@ -174,6 +197,11 @@ static inline void copy_kernel_to_fregs(
- kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
- }
-
-+static inline int copy_kernel_to_fregs_err(struct fregs_state *fx)
-+{
-+ return kernel_insn_err(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
-+}
-+
- static inline int copy_user_to_fregs(struct fregs_state __user *fx)
- {
- return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
-@@ -398,6 +426,21 @@ static inline int copy_user_to_xregs(str
-
- return err;
- }
-+
-+/*
-+ * Restore xstate from kernel space xsave area, return an error code instead an
-+ * exception.
-+ */
-+static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask)
-+{
-+ u32 lmask = mask;
-+ u32 hmask = mask >> 32;
-+ int err;
-+
-+ XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
-+
-+ return err;
-+}
-
- /*
- * These must be called with preempt disabled. Returns
---- a/arch/x86/kernel/fpu/signal.c
-+++ b/arch/x86/kernel/fpu/signal.c
-@@ -234,7 +234,8 @@ sanitize_restored_xstate(union fpregs_st
- */
- xsave->i387.mxcsr &= mxcsr_feature_mask;
-
-- convert_to_fxsr(&state->fxsave, ia32_env);
-+ if (ia32_env)
-+ convert_to_fxsr(&state->fxsave, ia32_env);
- }
- }
-
-@@ -337,28 +338,63 @@ static int __fpu__restore_sig(void __use
- kfree(tmp);
- return err;
- } else {
-+ union fpregs_state *state;
-+ void *tmp;
- int ret;
-
-+ tmp = kzalloc(sizeof(*state) + fpu_kernel_xstate_size + 64, GFP_KERNEL);
-+ if (!tmp)
-+ return -ENOMEM;
-+ state = PTR_ALIGN(tmp, 64);
-+
- /*
- * For 64-bit frames and 32-bit fsave frames, restore the user
- * state to the registers directly (with exceptions handled).
- */
-- if (use_xsave()) {
-- if ((unsigned long)buf_fx % 64 || fx_only) {
-- u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
-- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
-- ret = copy_user_to_fxregs(buf_fx);
-+ if ((unsigned long)buf_fx % 64)
-+ fx_only = 1;
-+
-+ if (use_xsave() && !fx_only) {
-+ u64 init_bv = xfeatures_mask & ~xfeatures;
-+
-+ if (using_compacted_format()) {
-+ ret = copy_user_to_xstate(&state->xsave, buf_fx);
- } else {
-- u64 init_bv = xfeatures_mask & ~xfeatures;
-- if (unlikely(init_bv))
-- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
-- ret = copy_user_to_xregs(buf_fx, xfeatures);
-+ ret = __copy_from_user(&state->xsave, buf_fx, state_size);
-+
-+ if (!ret && state_size > offsetof(struct xregs_state, header))
-+ ret = validate_xstate_header(&state->xsave.header);
- }
-+ if (ret)
-+ goto err_out;
-+ sanitize_restored_xstate(state, NULL, xfeatures,
-+ fx_only);
-+
-+ if (unlikely(init_bv))
-+ copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
-+ ret = copy_kernel_to_xregs_err(&state->xsave, xfeatures);
-+
- } else if (use_fxsr()) {
-- ret = copy_user_to_fxregs(buf_fx);
-- } else
-- ret = copy_user_to_fregs(buf_fx);
-+ ret = __copy_from_user(&state->fxsave, buf_fx, state_size);
-+ if (ret)
-+ goto err_out;
-+
-+ if (use_xsave()) {
-+ u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
-+ copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
-+ }
-+ state->fxsave.mxcsr &= mxcsr_feature_mask;
-+
-+ ret = copy_kernel_to_fxregs_err(&state->fxsave);
-+ } else {
-+ ret = __copy_from_user(&state->fsave, buf_fx, state_size);
-+ if (ret)
-+ goto err_out;
-+ ret = copy_kernel_to_fregs_err(&state->fsave);
-+ }
-
-+err_out:
-+ kfree(tmp);
- if (ret) {
- fpu__clear(fpu);
- return -1;
diff --git a/debian/patches-rt/0022-printk-implement-dev-kmsg.patch b/debian/patches-rt/0022-printk-implement-dev-kmsg.patch
index 09b21ff4e..145ac922e 100644
--- a/debian/patches-rt/0022-printk-implement-dev-kmsg.patch
+++ b/debian/patches-rt/0022-printk-implement-dev-kmsg.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:30:00 +0100
Subject: [PATCH 22/25] printk: implement /dev/kmsg
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Since printk messages are now logged to a new ring buffer, update
the /dev/kmsg functions to pull the messages from there.
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
-@@ -191,6 +191,7 @@ void __init setup_log_buf(int early);
+@@ -192,6 +192,7 @@ void __init setup_log_buf(int early);
void dump_stack_print_info(const char *log_lvl);
void show_regs_print_info(const char *log_lvl);
extern asmlinkage void dump_stack(void) __cold;
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int vprintk(const char *s, va_list args)
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -637,10 +637,11 @@ static ssize_t msg_print_ext_body(char *
+@@ -663,10 +663,11 @@ static ssize_t msg_print_ext_body(char *
/* /dev/kmsg - userspace message inject/listen interface */
struct devkmsg_user {
u64 seq;
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
static __printf(3, 4) __cold
-@@ -723,9 +724,11 @@ static ssize_t devkmsg_read(struct file
+@@ -749,9 +750,11 @@ static ssize_t devkmsg_read(struct file
size_t count, loff_t *ppos)
{
struct devkmsg_user *user = file->private_data;
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!user)
return -EBADF;
-@@ -734,52 +737,67 @@ static ssize_t devkmsg_read(struct file
+@@ -760,52 +763,67 @@ static ssize_t devkmsg_read(struct file
if (ret)
return ret;
@@ -166,7 +166,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out:
mutex_unlock(&user->lock);
return ret;
-@@ -788,19 +806,21 @@ static ssize_t devkmsg_read(struct file
+@@ -814,19 +832,21 @@ static ssize_t devkmsg_read(struct file
static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
{
struct devkmsg_user *user = file->private_data;
@@ -192,7 +192,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
break;
case SEEK_DATA:
/*
-@@ -808,40 +828,83 @@ static loff_t devkmsg_llseek(struct file
+@@ -834,40 +854,83 @@ static loff_t devkmsg_llseek(struct file
* like issued by 'dmesg -c'. Reading /dev/kmsg itself
* changes no global state, and does not clear anything.
*/
@@ -291,7 +291,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -871,10 +934,7 @@ static int devkmsg_open(struct inode *in
+@@ -897,10 +960,7 @@ static int devkmsg_open(struct inode *in
mutex_init(&user->lock);
diff --git a/debian/patches-rt/0022-x86-fpu-Merge-the-two-code-paths-in-__fpu__restore_s.patch b/debian/patches-rt/0022-x86-fpu-Merge-the-two-code-paths-in-__fpu__restore_s.patch
deleted file mode 100644
index 84889e202..000000000
--- a/debian/patches-rt/0022-x86-fpu-Merge-the-two-code-paths-in-__fpu__restore_s.patch
+++ /dev/null
@@ -1,195 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 27 Nov 2018 21:08:01 +0100
-Subject: [PATCH 22/27] x86/fpu: Merge the two code paths in
- __fpu__restore_sig()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-The ia32_fxstate case (32bit with fxsr) and the other (64bit, 32bit
-without fxsr) restore both from kernel memory and sanitize the content.
-The !ia32_fxstate version restores missing xstates from "init state"
-while the ia32_fxstate doesn't and skips it.
-
-Merge the two code paths and keep the !ia32_fxstate version. Copy only
-the user_i387_ia32_struct data structure in the ia32_fxstate.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/fpu/signal.c | 142 ++++++++++++++++---------------------------
- 1 file changed, 55 insertions(+), 87 deletions(-)
-
---- a/arch/x86/kernel/fpu/signal.c
-+++ b/arch/x86/kernel/fpu/signal.c
-@@ -263,12 +263,17 @@ static inline int copy_user_to_fpregs_ze
-
- static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
- {
-+ struct user_i387_ia32_struct *envp = NULL;
- int ia32_fxstate = (buf != buf_fx);
- struct task_struct *tsk = current;
- struct fpu *fpu = &tsk->thread.fpu;
- int state_size = fpu_kernel_xstate_size;
-+ struct user_i387_ia32_struct env;
-+ union fpregs_state *state;
- u64 xfeatures = 0;
- int fx_only = 0;
-+ int ret = 0;
-+ void *tmp;
-
- ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
- IS_ENABLED(CONFIG_IA32_EMULATION));
-@@ -303,105 +308,68 @@ static int __fpu__restore_sig(void __use
- }
- }
-
-+ tmp = kzalloc(sizeof(*state) + fpu_kernel_xstate_size + 64, GFP_KERNEL);
-+ if (!tmp)
-+ return -ENOMEM;
-+ state = PTR_ALIGN(tmp, 64);
-+
-+ if ((unsigned long)buf_fx % 64)
-+ fx_only = 1;
-+
-+ /*
-+ * For 32-bit frames with fxstate, copy the fxstate so it can be
-+ * reconstructed later.
-+ */
- if (ia32_fxstate) {
-- /*
-- * For 32-bit frames with fxstate, copy the user state to the
-- * thread's fpu state, reconstruct fxstate from the fsave
-- * header. Validate and sanitize the copied state.
-- */
-- struct user_i387_ia32_struct env;
-- union fpregs_state *state;
-- int err = 0;
-- void *tmp;
--
-- tmp = kzalloc(sizeof(*state) + fpu_kernel_xstate_size + 64, GFP_KERNEL);
-- if (!tmp)
-- return -ENOMEM;
-- state = PTR_ALIGN(tmp, 64);
-+ ret = __copy_from_user(&env, buf, sizeof(env));
-+ if (ret)
-+ goto err_out;
-+ envp = &env;
-+ }
-+ if (use_xsave() && !fx_only) {
-+ u64 init_bv = xfeatures_mask & ~xfeatures;
-
- if (using_compacted_format()) {
-- err = copy_user_to_xstate(&state->xsave, buf_fx);
-+ ret = copy_user_to_xstate(&state->xsave, buf_fx);
- } else {
-- err = __copy_from_user(&state->xsave, buf_fx, state_size);
--
-- if (!err && state_size > offsetof(struct xregs_state, header))
-- err = validate_xstate_header(&state->xsave.header);
-- }
-+ ret = __copy_from_user(&state->xsave, buf_fx, state_size);
-
-- if (err || __copy_from_user(&env, buf, sizeof(env))) {
-- err = -1;
-- } else {
-- sanitize_restored_xstate(state, &env, xfeatures, fx_only);
-- copy_kernel_to_fpregs(state);
-+ if (!ret && state_size > offsetof(struct xregs_state, header))
-+ ret = validate_xstate_header(&state->xsave.header);
- }
-+ if (ret)
-+ goto err_out;
-
-- kfree(tmp);
-- return err;
-- } else {
-- union fpregs_state *state;
-- void *tmp;
-- int ret;
--
-- tmp = kzalloc(sizeof(*state) + fpu_kernel_xstate_size + 64, GFP_KERNEL);
-- if (!tmp)
-- return -ENOMEM;
-- state = PTR_ALIGN(tmp, 64);
--
-- /*
-- * For 64-bit frames and 32-bit fsave frames, restore the user
-- * state to the registers directly (with exceptions handled).
-- */
-- if ((unsigned long)buf_fx % 64)
-- fx_only = 1;
--
-- if (use_xsave() && !fx_only) {
-- u64 init_bv = xfeatures_mask & ~xfeatures;
-+ sanitize_restored_xstate(state, envp, xfeatures, fx_only);
-
-- if (using_compacted_format()) {
-- ret = copy_user_to_xstate(&state->xsave, buf_fx);
-- } else {
-- ret = __copy_from_user(&state->xsave, buf_fx, state_size);
--
-- if (!ret && state_size > offsetof(struct xregs_state, header))
-- ret = validate_xstate_header(&state->xsave.header);
-- }
-- if (ret)
-- goto err_out;
-- sanitize_restored_xstate(state, NULL, xfeatures,
-- fx_only);
--
-- if (unlikely(init_bv))
-- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
-- ret = copy_kernel_to_xregs_err(&state->xsave, xfeatures);
--
-- } else if (use_fxsr()) {
-- ret = __copy_from_user(&state->fxsave, buf_fx, state_size);
-- if (ret)
-- goto err_out;
--
-- if (use_xsave()) {
-- u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
-- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
-- }
-- state->fxsave.mxcsr &= mxcsr_feature_mask;
--
-- ret = copy_kernel_to_fxregs_err(&state->fxsave);
-- } else {
-- ret = __copy_from_user(&state->fsave, buf_fx, state_size);
-- if (ret)
-- goto err_out;
-- ret = copy_kernel_to_fregs_err(&state->fsave);
-+ if (unlikely(init_bv))
-+ copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
-+ ret = copy_kernel_to_xregs_err(&state->xsave, xfeatures);
-+
-+ } else if (use_fxsr()) {
-+ ret = __copy_from_user(&state->fxsave, buf_fx, state_size);
-+ if (ret)
-+ goto err_out;
-+
-+ sanitize_restored_xstate(state, envp, xfeatures, fx_only);
-+ if (use_xsave()) {
-+ u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
-+ copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
- }
-
--err_out:
-- kfree(tmp);
-- if (ret) {
-- fpu__clear(fpu);
-- return -1;
-- }
-+ ret = copy_kernel_to_fxregs_err(&state->fxsave);
-+ } else {
-+ ret = __copy_from_user(&state->fsave, buf_fx, state_size);
-+ if (ret)
-+ goto err_out;
-+ ret = copy_kernel_to_fregs_err(&state->fsave);
- }
-
-- return 0;
-+err_out:
-+ kfree(tmp);
-+ if (ret)
-+ fpu__clear(fpu);
-+ return ret;
- }
-
- static inline int xstate_sigframe_size(void)
diff --git a/debian/patches-rt/0023-printk-implement-syslog.patch b/debian/patches-rt/0023-printk-implement-syslog.patch
index 95321e759..f639e063a 100644
--- a/debian/patches-rt/0023-printk-implement-syslog.patch
+++ b/debian/patches-rt/0023-printk-implement-syslog.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:30:01 +0100
Subject: [PATCH 23/25] printk: implement syslog
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Since printk messages are now logged to a new ring buffer, update
the syslog functions to pull the messages from there.
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -387,10 +387,12 @@ DECLARE_STATIC_PRINTKRB_CPULOCK(printk_c
+@@ -397,10 +397,12 @@ DECLARE_STATIC_PRINTKRB_CPULOCK(printk_c
/* record buffer */
DECLARE_STATIC_PRINTKRB(printk_rb, CONFIG_LOG_BUF_SHIFT, &printk_cpulock);
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static size_t syslog_partial;
static bool syslog_time;
-@@ -1249,30 +1251,42 @@ static size_t msg_print_text(const struc
+@@ -1293,30 +1295,42 @@ static size_t msg_print_text(const struc
return len;
}
@@ -85,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -1282,131 +1296,212 @@ static int syslog_print(char __user *buf
+@@ -1326,131 +1340,212 @@ static int syslog_print(char __user *buf
if (!syslog_partial)
syslog_time = printk_time;
@@ -360,7 +360,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
error = check_syslog_permissions(type, source);
if (error)
-@@ -1424,11 +1519,49 @@ int do_syslog(int type, char __user *buf
+@@ -1468,11 +1563,49 @@ int do_syslog(int type, char __user *buf
return 0;
if (!access_ok(buf, len))
return -EFAULT;
@@ -414,7 +414,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
break;
/* Read/clear last kernel messages */
case SYSLOG_ACTION_READ_CLEAR:
-@@ -1473,47 +1606,45 @@ int do_syslog(int type, char __user *buf
+@@ -1517,47 +1650,45 @@ int do_syslog(int type, char __user *buf
break;
/* Number of chars in the log buffer */
case SYSLOG_ACTION_SIZE_UNREAD:
@@ -484,7 +484,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return error;
}
-@@ -1932,7 +2063,6 @@ EXPORT_SYMBOL(printk);
+@@ -1979,7 +2110,6 @@ EXPORT_SYMBOL(printk);
#define printk_time false
static u64 syslog_seq;
diff --git a/debian/patches-rt/0023-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch b/debian/patches-rt/0023-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch
deleted file mode 100644
index a0b30e568..000000000
--- a/debian/patches-rt/0023-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch
+++ /dev/null
@@ -1,591 +0,0 @@
-From: Rik van Riel <riel@surriel.com>
-Date: Sun, 9 Sep 2018 18:30:53 +0200
-Subject: [PATCH 23/27] x86/fpu: Defer FPU state load until return to userspace
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Defer loading of FPU state until return to userspace. This gives
-the kernel the potential to skip loading FPU state for tasks that
-stay in kernel mode, or for tasks that end up with repeated
-invocations of kernel_fpu_begin() & kernel_fpu_end().
-
-The __fpregs_changes_{begin|end}() section ensures that the register
-remain unchanged. Otherwise a context switch or a BH could save the
-registers to its FPU context and processor's FPU register would became
-random if beeing modified at the same time.
-
-KVM swaps the host/guest register on entry/exit path. I kept the flow as
-is. First it ensures that the registers are loaded and then saves the
-current (host) state before it loads the guest's register. The swap is
-done at the very end with disabled interrupts so it should not change
-anymore before theg guest is entered. The read/save version seems to be
-cheaper compared to memcpy() in a micro benchmark.
-
-Each thread gets TIF_NEED_FPU_LOAD set as part of fork() / fpu__copy().
-For kernel threads, this flag gets never cleared which avoids saving /
-restoring the FPU state for kernel threads and during in-kernel usage of
-the FPU register.
-
-Signed-off-by: Rik van Riel <riel@surriel.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/entry/common.c | 8 ++
- arch/x86/include/asm/fpu/api.h | 22 +++++++
- arch/x86/include/asm/fpu/internal.h | 27 +++++----
- arch/x86/include/asm/trace/fpu.h | 5 +
- arch/x86/kernel/fpu/core.c | 105 +++++++++++++++++++++++++++---------
- arch/x86/kernel/fpu/signal.c | 48 +++++++++-------
- arch/x86/kernel/process.c | 2
- arch/x86/kernel/process_32.c | 5 +
- arch/x86/kernel/process_64.c | 5 +
- arch/x86/kvm/x86.c | 20 +++++-
- 10 files changed, 181 insertions(+), 66 deletions(-)
-
---- a/arch/x86/entry/common.c
-+++ b/arch/x86/entry/common.c
-@@ -31,6 +31,7 @@
- #include <asm/vdso.h>
- #include <linux/uaccess.h>
- #include <asm/cpufeature.h>
-+#include <asm/fpu/api.h>
-
- #define CREATE_TRACE_POINTS
- #include <trace/events/syscalls.h>
-@@ -196,6 +197,13 @@ static void exit_to_usermode_loop(struct
- if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
- exit_to_usermode_loop(regs, cached_flags);
-
-+ /* Reload ti->flags; we may have rescheduled above. */
-+ cached_flags = READ_ONCE(ti->flags);
-+
-+ fpregs_assert_state_consistent();
-+ if (unlikely(cached_flags & _TIF_NEED_FPU_LOAD))
-+ switch_fpu_return();
-+
- #ifdef CONFIG_COMPAT
- /*
- * Compat syscalls set TS_COMPAT. Make sure we clear it before
---- a/arch/x86/include/asm/fpu/api.h
-+++ b/arch/x86/include/asm/fpu/api.h
-@@ -10,7 +10,7 @@
-
- #ifndef _ASM_X86_FPU_API_H
- #define _ASM_X86_FPU_API_H
--#include <linux/preempt.h>
-+#include <linux/bottom_half.h>
-
- /*
- * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
-@@ -22,17 +22,37 @@
- extern void kernel_fpu_begin(void);
- extern void kernel_fpu_end(void);
- extern bool irq_fpu_usable(void);
-+extern void fpregs_mark_activate(void);
-
-+/*
-+ * Use fpregs_lock() while editing CPU's FPU registers or fpu->state.
-+ * A context switch will (and softirq might) save CPU's FPU register to
-+ * fpu->state and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in a random
-+ * state.
-+ */
- static inline void fpregs_lock(void)
- {
- preempt_disable();
-+ local_bh_disable();
- }
-
- static inline void fpregs_unlock(void)
- {
-+ local_bh_enable();
- preempt_enable();
- }
-
-+#ifdef CONFIG_X86_DEBUG_FPU
-+extern void fpregs_assert_state_consistent(void);
-+#else
-+static inline void fpregs_assert_state_consistent(void) { }
-+#endif
-+
-+/*
-+ * Load the task FPU state before returning to userspace.
-+ */
-+extern void switch_fpu_return(void);
-+
- /*
- * Query the presence of one or more xfeatures. Works on any legacy CPU as well.
- *
---- a/arch/x86/include/asm/fpu/internal.h
-+++ b/arch/x86/include/asm/fpu/internal.h
-@@ -30,7 +30,7 @@ extern void fpu__prepare_write(struct fp
- extern void fpu__save(struct fpu *fpu);
- extern int fpu__restore_sig(void __user *buf, int ia32_frame);
- extern void fpu__drop(struct fpu *fpu);
--extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
-+extern int fpu__copy(struct task_struct *dst, struct task_struct *src);
- extern void fpu__clear(struct fpu *fpu);
- extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
- extern int dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate);
-@@ -559,13 +559,20 @@ static inline void fpregs_activate(struc
- trace_x86_fpu_regs_activated(fpu);
- }
-
--static inline void __fpregs_load_activate(struct fpu *fpu, int cpu)
-+static inline void __fpregs_load_activate(void)
- {
-+ struct fpu *fpu = &current->thread.fpu;
-+ int cpu = smp_processor_id();
-+
-+ if (WARN_ON_ONCE(current->mm == NULL))
-+ return;
-+
- if (!fpregs_state_valid(fpu, cpu)) {
-- if (current->mm)
-- copy_kernel_to_fpregs(&fpu->state);
-+ copy_kernel_to_fpregs(&fpu->state);
- fpregs_activate(fpu);
-+ fpu->last_cpu = cpu;
- }
-+ clear_thread_flag(TIF_NEED_FPU_LOAD);
- }
-
- /*
-@@ -576,8 +583,8 @@ static inline void __fpregs_load_activat
- * - switch_fpu_prepare() saves the old state.
- * This is done within the context of the old process.
- *
-- * - switch_fpu_finish() restores the new state as
-- * necessary.
-+ * - switch_fpu_finish() sets TIF_NEED_FPU_LOAD; the floating point state
-+ * will get loaded on return to userspace, or when the kernel needs it.
- *
- * The FPU context is only stored/restore for user task and ->mm is used to
- * distinguish between kernel and user threads.
-@@ -607,10 +614,10 @@ switch_fpu_prepare(struct fpu *old_fpu,
- */
-
- /*
-- * Set up the userspace FPU context for the new task, if the task
-- * has used the FPU.
-+ * Load PKRU from the FPU context if available. Delay loading the loading of the
-+ * complete FPU state until the return to userland.
- */
--static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
-+static inline void switch_fpu_finish(struct fpu *new_fpu)
- {
- struct pkru_state *pk;
- u32 pkru_val = init_pkru_value;
-@@ -618,7 +625,7 @@ static inline void switch_fpu_finish(str
- if (!static_cpu_has(X86_FEATURE_FPU))
- return;
-
-- __fpregs_load_activate(new_fpu, cpu);
-+ set_thread_flag(TIF_NEED_FPU_LOAD);
-
- if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
- return;
---- a/arch/x86/include/asm/trace/fpu.h
-+++ b/arch/x86/include/asm/trace/fpu.h
-@@ -13,19 +13,22 @@ DECLARE_EVENT_CLASS(x86_fpu,
-
- TP_STRUCT__entry(
- __field(struct fpu *, fpu)
-+ __field(bool, load_fpu)
- __field(u64, xfeatures)
- __field(u64, xcomp_bv)
- ),
-
- TP_fast_assign(
- __entry->fpu = fpu;
-+ __entry->load_fpu = test_thread_flag(TIF_NEED_FPU_LOAD);
- if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
- __entry->xfeatures = fpu->state.xsave.header.xfeatures;
- __entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
- }
- ),
-- TP_printk("x86/fpu: %p xfeatures: %llx xcomp_bv: %llx",
-+ TP_printk("x86/fpu: %p load: %d xfeatures: %llx xcomp_bv: %llx",
- __entry->fpu,
-+ __entry->load_fpu,
- __entry->xfeatures,
- __entry->xcomp_bv
- )
---- a/arch/x86/kernel/fpu/core.c
-+++ b/arch/x86/kernel/fpu/core.c
-@@ -102,23 +102,20 @@ static void __kernel_fpu_begin(void)
- kernel_fpu_disable();
-
- if (current->mm) {
-- /*
-- * Ignore return value -- we don't care if reg state
-- * is clobbered.
-- */
-- copy_fpregs_to_fpstate(fpu);
-- } else {
-- __cpu_invalidate_fpregs_state();
-+ if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
-+ set_thread_flag(TIF_NEED_FPU_LOAD);
-+ /*
-+ * Ignore return value -- we don't care if reg state
-+ * is clobbered.
-+ */
-+ copy_fpregs_to_fpstate(fpu);
-+ }
- }
-+ __cpu_invalidate_fpregs_state();
- }
-
- static void __kernel_fpu_end(void)
- {
-- struct fpu *fpu = &current->thread.fpu;
--
-- if (current->mm)
-- copy_kernel_to_fpregs(&fpu->state);
--
- kernel_fpu_enable();
- }
-
-@@ -145,14 +142,17 @@ void fpu__save(struct fpu *fpu)
- {
- WARN_ON_FPU(fpu != &current->thread.fpu);
-
-- preempt_disable();
-+ fpregs_lock();
- trace_x86_fpu_before_save(fpu);
-
-- if (!copy_fpregs_to_fpstate(fpu))
-- copy_kernel_to_fpregs(&fpu->state);
-+ if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
-+ if (!copy_fpregs_to_fpstate(fpu)) {
-+ copy_kernel_to_fpregs(&fpu->state);
-+ }
-+ }
-
- trace_x86_fpu_after_save(fpu);
-- preempt_enable();
-+ fpregs_unlock();
- }
- EXPORT_SYMBOL_GPL(fpu__save);
-
-@@ -185,8 +185,11 @@ void fpstate_init(union fpregs_state *st
- }
- EXPORT_SYMBOL_GPL(fpstate_init);
-
--int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
-+int fpu__copy(struct task_struct *dst, struct task_struct *src)
- {
-+ struct fpu *dst_fpu = &dst->thread.fpu;
-+ struct fpu *src_fpu = &src->thread.fpu;
-+
- dst_fpu->last_cpu = -1;
-
- if (!static_cpu_has(X86_FEATURE_FPU))
-@@ -201,16 +204,23 @@ int fpu__copy(struct fpu *dst_fpu, struc
- memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
-
- /*
-- * Save current FPU registers directly into the child
-- * FPU context, without any memory-to-memory copying.
-+ * If the FPU registers are not current just memcpy() the state.
-+ * Otherwise save current FPU registers directly into the child's FPU
-+ * context, without any memory-to-memory copying.
- *
- * ( The function 'fails' in the FNSAVE case, which destroys
-- * register contents so we have to copy them back. )
-+ * register contents so we have to load them back. )
- */
-- if (!copy_fpregs_to_fpstate(dst_fpu)) {
-- memcpy(&src_fpu->state, &dst_fpu->state, fpu_kernel_xstate_size);
-- copy_kernel_to_fpregs(&src_fpu->state);
-- }
-+ fpregs_lock();
-+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
-+ memcpy(&dst_fpu->state, &src_fpu->state, fpu_kernel_xstate_size);
-+
-+ else if (!copy_fpregs_to_fpstate(dst_fpu))
-+ copy_kernel_to_fpregs(&dst_fpu->state);
-+
-+ fpregs_unlock();
-+
-+ set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD);
-
- trace_x86_fpu_copy_src(src_fpu);
- trace_x86_fpu_copy_dst(dst_fpu);
-@@ -226,10 +236,9 @@ static void fpu__initialize(struct fpu *
- {
- WARN_ON_FPU(fpu != &current->thread.fpu);
-
-+ set_thread_flag(TIF_NEED_FPU_LOAD);
- fpstate_init(&fpu->state);
- trace_x86_fpu_init_state(fpu);
--
-- trace_x86_fpu_activate_state(fpu);
- }
-
- /*
-@@ -308,6 +317,8 @@ void fpu__drop(struct fpu *fpu)
- */
- static inline void copy_init_fpstate_to_fpregs(void)
- {
-+ fpregs_lock();
-+
- if (use_xsave())
- copy_kernel_to_xregs(&init_fpstate.xsave, -1);
- else if (static_cpu_has(X86_FEATURE_FXSR))
-@@ -317,6 +328,9 @@ static inline void copy_init_fpstate_to_
-
- if (boot_cpu_has(X86_FEATURE_OSPKE))
- copy_init_pkru_to_fpregs();
-+
-+ fpregs_mark_activate();
-+ fpregs_unlock();
- }
-
- /*
-@@ -340,6 +354,45 @@ void fpu__clear(struct fpu *fpu)
- }
-
- /*
-+ * Load FPU context before returning to userspace.
-+ */
-+void switch_fpu_return(void)
-+{
-+ if (!static_cpu_has(X86_FEATURE_FPU))
-+ return;
-+
-+ __fpregs_load_activate();
-+}
-+EXPORT_SYMBOL_GPL(switch_fpu_return);
-+
-+#ifdef CONFIG_X86_DEBUG_FPU
-+/*
-+ * If current FPU state according to its tracking (loaded FPU ctx on this CPU)
-+ * is not valid then we must have TIF_NEED_FPU_LOAD set so the context is loaded on
-+ * return to userland.
-+ */
-+void fpregs_assert_state_consistent(void)
-+{
-+ struct fpu *fpu = &current->thread.fpu;
-+
-+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
-+ return;
-+ WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id()));
-+}
-+EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent);
-+#endif
-+
-+void fpregs_mark_activate(void)
-+{
-+ struct fpu *fpu = &current->thread.fpu;
-+
-+ fpregs_activate(fpu);
-+ fpu->last_cpu = smp_processor_id();
-+ clear_thread_flag(TIF_NEED_FPU_LOAD);
-+}
-+EXPORT_SYMBOL_GPL(fpregs_mark_activate);
-+
-+/*
- * x87 math exception handling:
- */
-
---- a/arch/x86/kernel/fpu/signal.c
-+++ b/arch/x86/kernel/fpu/signal.c
-@@ -269,11 +269,9 @@ static int __fpu__restore_sig(void __use
- struct fpu *fpu = &tsk->thread.fpu;
- int state_size = fpu_kernel_xstate_size;
- struct user_i387_ia32_struct env;
-- union fpregs_state *state;
- u64 xfeatures = 0;
- int fx_only = 0;
- int ret = 0;
-- void *tmp;
-
- ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
- IS_ENABLED(CONFIG_IA32_EMULATION));
-@@ -308,14 +306,18 @@ static int __fpu__restore_sig(void __use
- }
- }
-
-- tmp = kzalloc(sizeof(*state) + fpu_kernel_xstate_size + 64, GFP_KERNEL);
-- if (!tmp)
-- return -ENOMEM;
-- state = PTR_ALIGN(tmp, 64);
-+ /*
-+ * The current state of the FPU registers does not matter. By setting
-+ * TIF_NEED_FPU_LOAD unconditionally it is ensured that the our xstate
-+ * is not modified on context switch and that the xstate is considered
-+ * to loaded again on return to userland (overriding last_cpu avoids the
-+ * optimisation).
-+ */
-+ set_thread_flag(TIF_NEED_FPU_LOAD);
-+ __fpu_invalidate_fpregs_state(fpu);
-
- if ((unsigned long)buf_fx % 64)
- fx_only = 1;
--
- /*
- * For 32-bit frames with fxstate, copy the fxstate so it can be
- * reconstructed later.
-@@ -330,43 +332,51 @@ static int __fpu__restore_sig(void __use
- u64 init_bv = xfeatures_mask & ~xfeatures;
-
- if (using_compacted_format()) {
-- ret = copy_user_to_xstate(&state->xsave, buf_fx);
-+ ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
- } else {
-- ret = __copy_from_user(&state->xsave, buf_fx, state_size);
-+ ret = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
-
- if (!ret && state_size > offsetof(struct xregs_state, header))
-- ret = validate_xstate_header(&state->xsave.header);
-+ ret = validate_xstate_header(&fpu->state.xsave.header);
- }
- if (ret)
- goto err_out;
-
-- sanitize_restored_xstate(state, envp, xfeatures, fx_only);
-+ sanitize_restored_xstate(&fpu->state, envp, xfeatures, fx_only);
-
-+ fpregs_lock();
- if (unlikely(init_bv))
- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
-- ret = copy_kernel_to_xregs_err(&state->xsave, xfeatures);
-+ ret = copy_kernel_to_xregs_err(&fpu->state.xsave, xfeatures);
-
- } else if (use_fxsr()) {
-- ret = __copy_from_user(&state->fxsave, buf_fx, state_size);
-- if (ret)
-+ ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
-+ if (ret) {
-+ ret = -EFAULT;
- goto err_out;
-+ }
-+
-+ sanitize_restored_xstate(&fpu->state, envp, xfeatures, fx_only);
-
-- sanitize_restored_xstate(state, envp, xfeatures, fx_only);
-+ fpregs_lock();
- if (use_xsave()) {
- u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
- }
-
-- ret = copy_kernel_to_fxregs_err(&state->fxsave);
-+ ret = copy_kernel_to_fxregs_err(&fpu->state.fxsave);
- } else {
-- ret = __copy_from_user(&state->fsave, buf_fx, state_size);
-+ ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
- if (ret)
- goto err_out;
-- ret = copy_kernel_to_fregs_err(&state->fsave);
-+ fpregs_lock();
-+ ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
- }
-+ if (!ret)
-+ fpregs_mark_activate();
-+ fpregs_unlock();
-
- err_out:
-- kfree(tmp);
- if (ret)
- fpu__clear(fpu);
- return ret;
---- a/arch/x86/kernel/process.c
-+++ b/arch/x86/kernel/process.c
-@@ -101,7 +101,7 @@ int arch_dup_task_struct(struct task_str
- dst->thread.vm86 = NULL;
- #endif
-
-- return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
-+ return fpu__copy(dst, src);
- }
-
- /*
---- a/arch/x86/kernel/process_32.c
-+++ b/arch/x86/kernel/process_32.c
-@@ -234,7 +234,8 @@ EXPORT_SYMBOL_GPL(start_thread);
-
- /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
-
-- switch_fpu_prepare(prev_fpu, cpu);
-+ if (!test_thread_flag(TIF_NEED_FPU_LOAD))
-+ switch_fpu_prepare(prev_fpu, cpu);
-
- /*
- * Save away %gs. No need to save %fs, as it was saved on the
-@@ -290,7 +291,7 @@ EXPORT_SYMBOL_GPL(start_thread);
-
- this_cpu_write(current_task, next_p);
-
-- switch_fpu_finish(next_fpu, cpu);
-+ switch_fpu_finish(next_fpu);
-
- /* Load the Intel cache allocation PQR MSR. */
- resctrl_sched_in();
---- a/arch/x86/kernel/process_64.c
-+++ b/arch/x86/kernel/process_64.c
-@@ -520,7 +520,8 @@ void compat_start_thread(struct pt_regs
- WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
- this_cpu_read(irq_count) != -1);
-
-- switch_fpu_prepare(prev_fpu, cpu);
-+ if (!test_thread_flag(TIF_NEED_FPU_LOAD))
-+ switch_fpu_prepare(prev_fpu, cpu);
-
- /* We must save %fs and %gs before load_TLS() because
- * %fs and %gs may be cleared by load_TLS().
-@@ -572,7 +573,7 @@ void compat_start_thread(struct pt_regs
- this_cpu_write(current_task, next_p);
- this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
-
-- switch_fpu_finish(next_fpu, cpu);
-+ switch_fpu_finish(next_fpu);
-
- /* Reload sp0. */
- update_task_stack(next_p);
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -7868,6 +7868,10 @@ static int vcpu_enter_guest(struct kvm_v
- wait_lapic_expire(vcpu);
- guest_enter_irqoff();
-
-+ fpregs_assert_state_consistent();
-+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
-+ switch_fpu_return();
-+
- if (unlikely(vcpu->arch.switch_db_regs)) {
- set_debugreg(0, 7);
- set_debugreg(vcpu->arch.eff_db[0], 0);
-@@ -8126,22 +8130,30 @@ static int complete_emulated_mmio(struct
- /* Swap (qemu) user FPU context for the guest FPU context. */
- static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
- {
-- preempt_disable();
-+ fpregs_lock();
-+
- copy_fpregs_to_fpstate(&current->thread.fpu);
- /* PKRU is separately restored in kvm_x86_ops->run. */
- __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state,
- ~XFEATURE_MASK_PKRU);
-- preempt_enable();
-+
-+ fpregs_mark_activate();
-+ fpregs_unlock();
-+
- trace_kvm_fpu(1);
- }
-
- /* When vcpu_run ends, restore user space FPU context. */
- static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
- {
-- preempt_disable();
-+ fpregs_lock();
-+
- copy_fpregs_to_fpstate(vcpu->arch.guest_fpu);
- copy_kernel_to_fpregs(&current->thread.fpu.state);
-- preempt_enable();
-+
-+ fpregs_mark_activate();
-+ fpregs_unlock();
-+
- ++vcpu->stat.fpu_reload;
- trace_kvm_fpu(0);
- }
diff --git a/debian/patches-rt/0024-printk-implement-kmsg_dump.patch b/debian/patches-rt/0024-printk-implement-kmsg_dump.patch
index 35bb5effb..4adf20447 100644
--- a/debian/patches-rt/0024-printk-implement-kmsg_dump.patch
+++ b/debian/patches-rt/0024-printk-implement-kmsg_dump.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:30:02 +0100
Subject: [PATCH 24/25] printk: implement kmsg_dump
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Since printk messages are now logged to a new ring buffer, update
the kmsg_dump functions to pull the messages from there.
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_PRINTK
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -397,13 +397,13 @@ static size_t syslog_partial;
+@@ -407,13 +407,13 @@ static size_t syslog_partial;
static bool syslog_time;
/* index and sequence number of the first record stored in the buffer */
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* the next printk record to read after the last 'clear' command */
static u64 clear_seq;
static u32 clear_idx;
-@@ -446,38 +446,6 @@ static char *log_dict(const struct print
+@@ -460,38 +460,6 @@ static char *log_dict(const struct print
return (char *)msg + sizeof(struct printk_log) + msg->text_len;
}
@@ -85,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void printk_emergency(char *buffer, int level, u64 ts_nsec, u16 cpu,
char *text, u16 text_len);
-@@ -2063,9 +2031,7 @@ EXPORT_SYMBOL(printk);
+@@ -2110,9 +2078,7 @@ EXPORT_SYMBOL(printk);
#define printk_time false
static u64 syslog_seq;
@@ -95,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static char *log_text(const struct printk_log *msg) { return NULL; }
static char *log_dict(const struct printk_log *msg) { return NULL; }
static struct printk_log *log_from_idx(u32 idx) { return NULL; }
-@@ -2974,7 +2940,6 @@ module_param_named(always_kmsg_dump, alw
+@@ -3022,7 +2988,6 @@ module_param_named(always_kmsg_dump, alw
void kmsg_dump(enum kmsg_dump_reason reason)
{
struct kmsg_dumper *dumper;
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
return;
-@@ -2987,12 +2952,7 @@ void kmsg_dump(enum kmsg_dump_reason rea
+@@ -3035,12 +3000,7 @@ void kmsg_dump(enum kmsg_dump_reason rea
/* initialize iterator with data about the stored records */
dumper->active = true;
@@ -117,7 +117,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* invoke dumper which will iterate over records */
dumper->dump(dumper, reason);
-@@ -3025,33 +2985,67 @@ void kmsg_dump(enum kmsg_dump_reason rea
+@@ -3073,33 +3033,67 @@ void kmsg_dump(enum kmsg_dump_reason rea
bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
char *line, size_t size, size_t *len)
{
@@ -200,7 +200,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -3074,12 +3068,11 @@ bool kmsg_dump_get_line_nolock(struct km
+@@ -3122,12 +3116,11 @@ bool kmsg_dump_get_line_nolock(struct km
bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
char *line, size_t size, size_t *len)
{
@@ -215,7 +215,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -3107,74 +3100,101 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
+@@ -3155,74 +3148,101 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
char *buf, size_t size, size_t *len)
{
@@ -369,7 +369,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
-@@ -3190,10 +3210,8 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
+@@ -3238,10 +3258,8 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
*/
void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
{
@@ -382,7 +382,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -3206,11 +3224,9 @@ void kmsg_dump_rewind_nolock(struct kmsg
+@@ -3254,11 +3272,9 @@ void kmsg_dump_rewind_nolock(struct kmsg
*/
void kmsg_dump_rewind(struct kmsg_dumper *dumper)
{
diff --git a/debian/patches-rt/0024-x86-fpu-Add-a-fastpath-to-__fpu__restore_sig.patch b/debian/patches-rt/0024-x86-fpu-Add-a-fastpath-to-__fpu__restore_sig.patch
deleted file mode 100644
index 11691160a..000000000
--- a/debian/patches-rt/0024-x86-fpu-Add-a-fastpath-to-__fpu__restore_sig.patch
+++ /dev/null
@@ -1,53 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 2 Apr 2019 13:02:25 +0200
-Subject: [PATCH 24/27] x86/fpu: Add a fastpath to __fpu__restore_sig()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-The previous commits refactor the restoration of the FPU registers so
-that they can be loaded from in-kernel memory. This overhead can be
-avoided if the load can be performed without a pagefault.
-
-Attempt to restore FPU registers by invoking
-copy_user_to_fpregs_zeroing(). If it fails try the slowpath which can handle
-pagefaults.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/fpu/signal.c | 16 ++++++++++++++--
- 1 file changed, 14 insertions(+), 2 deletions(-)
-
---- a/arch/x86/kernel/fpu/signal.c
-+++ b/arch/x86/kernel/fpu/signal.c
-@@ -242,10 +242,10 @@ sanitize_restored_xstate(union fpregs_st
- /*
- * Restore the extended state if present. Otherwise, restore the FP/SSE state.
- */
--static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
-+static int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
- {
- if (use_xsave()) {
-- if ((unsigned long)buf % 64 || fx_only) {
-+ if (fx_only) {
- u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
- return copy_user_to_fxregs(buf);
-@@ -327,7 +327,19 @@ static int __fpu__restore_sig(void __use
- if (ret)
- goto err_out;
- envp = &env;
-+ } else {
-+ fpregs_lock();
-+ pagefault_disable();
-+ ret = copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only);
-+ pagefault_enable();
-+ if (!ret) {
-+ fpregs_mark_activate();
-+ fpregs_unlock();
-+ return 0;
-+ }
-+ fpregs_unlock();
- }
-+
- if (use_xsave() && !fx_only) {
- u64 init_bv = xfeatures_mask & ~xfeatures;
-
diff --git a/debian/patches-rt/0025-printk-remove-unused-code.patch b/debian/patches-rt/0025-printk-remove-unused-code.patch
index 721617d8f..ae60cb659 100644
--- a/debian/patches-rt/0025-printk-remove-unused-code.patch
+++ b/debian/patches-rt/0025-printk-remove-unused-code.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Tue, 12 Feb 2019 15:30:03 +0100
Subject: [PATCH 25/25] printk: remove unused code
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Code relating to the safe context and anything dealing with the
previous log buffer implementation is no longer in use. Remove it.
@@ -9,30 +9,18 @@ previous log buffer implementation is no longer in use. Remove it.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/printk/internal.h | 53 ---------------
- kernel/printk/printk.c | 163 ++++-------------------------------------------
+ kernel/printk/internal.h | 41 -----------
+ kernel/printk/printk.c | 161 ++++-------------------------------------------
lib/bust_spinlocks.c | 3
- 3 files changed, 17 insertions(+), 202 deletions(-)
+ 3 files changed, 16 insertions(+), 189 deletions(-)
delete mode 100644 kernel/printk/internal.h
--- a/kernel/printk/internal.h
+++ /dev/null
-@@ -1,53 +0,0 @@
+@@ -1,41 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * internal.h - printk internal definitions
-- *
-- * This program is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License
-- * as published by the Free Software Foundation; either version 2
-- * of the License, or (at your option) any later version.
-- *
-- * This program is distributed in the hope that it will be useful,
-- * but WITHOUT ANY WARRANTY; without even the implied warranty of
-- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- * GNU General Public License for more details.
-- *
-- * You should have received a copy of the GNU General Public License
-- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#include <linux/percpu.h>
-
@@ -73,7 +61,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-#define printk_safe_exit_irq() local_irq_enable()
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -60,7 +60,6 @@
+@@ -61,7 +61,6 @@
#include "console_cmdline.h"
#include "braille.h"
@@ -81,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int console_printk[5] = {
CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */
-@@ -346,41 +345,6 @@ struct printk_log {
+@@ -356,41 +355,6 @@ struct printk_log {
#endif
;
@@ -123,13 +111,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
DECLARE_STATIC_PRINTKRB_CPULOCK(printk_cpulock);
#ifdef CONFIG_PRINTK
-@@ -390,23 +354,15 @@ DECLARE_STATIC_PRINTKRB(printk_rb, CONFI
+@@ -400,23 +364,15 @@ DECLARE_STATIC_PRINTKRB(printk_rb, CONFI
static DEFINE_MUTEX(syslog_lock);
DECLARE_STATIC_PRINTKRB_ITER(syslog_iter, &printk_rb);
-DECLARE_WAIT_QUEUE_HEAD(log_wait);
-/* the next printk record to read by syslog(READ) or /proc/kmsg */
-+/* the last printk record read by syslog(READ) or /proc/kmsg */
++/* the last printk record to read by syslog(READ) or /proc/kmsg */
static u64 syslog_seq;
static size_t syslog_partial;
static bool syslog_time;
@@ -142,14 +130,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
static DEFINE_MUTEX(kmsg_dump_lock);
--/* the next printk record to read after the last 'clear' command */
-+/* the last printk record at the last 'clear' command */
+ /* the next printk record to read after the last 'clear' command */
static u64 clear_seq;
-static u32 clear_idx;
- #define PREFIX_MAX 32
- #define LOG_LINE_MAX (1024 - PREFIX_MAX)
-@@ -414,24 +370,16 @@ static u32 clear_idx;
+ #ifdef CONFIG_PRINTK_CALLER
+ #define PREFIX_MAX 48
+@@ -428,24 +384,16 @@ static u32 clear_idx;
#define LOG_LEVEL(v) ((v) & 0x07)
#define LOG_FACILITY(v) ((v) >> 3 & 0xff)
@@ -176,7 +163,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/* human readable text of the record */
-@@ -944,11 +892,6 @@ const struct file_operations kmsg_fops =
+@@ -970,11 +918,6 @@ const struct file_operations kmsg_fops =
*/
void log_buf_vmcoreinfo_setup(void)
{
@@ -188,7 +175,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Export struct printk_log size and field offsets. User space tools can
* parse it and detect any changes to structure down the line.
-@@ -961,6 +904,8 @@ void log_buf_vmcoreinfo_setup(void)
+@@ -990,6 +933,8 @@ void log_buf_vmcoreinfo_setup(void)
}
#endif
@@ -197,7 +184,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* requested log_buf_len from kernel cmdline */
static unsigned long __initdata new_log_buf_len;
-@@ -1026,9 +971,12 @@ static void __init log_buf_add_cpu(void)
+@@ -1055,9 +1000,12 @@ static void __init log_buf_add_cpu(void)
#else /* !CONFIG_SMP */
static inline void log_buf_add_cpu(void) {}
#endif /* CONFIG_SMP */
@@ -210,7 +197,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
unsigned long flags;
char *new_log_buf;
unsigned int free;
-@@ -1067,6 +1015,7 @@ void __init setup_log_buf(int early)
+@@ -1089,6 +1037,7 @@ void __init setup_log_buf(int early)
pr_info("log_buf_len: %u bytes\n", log_buf_len);
pr_info("early log buf free: %u(%u%%)\n",
free, (free * 100) / __LOG_BUF_LEN);
@@ -218,7 +205,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static bool __read_mostly ignore_loglevel;
-@@ -1962,7 +1911,7 @@ asmlinkage int vprintk_emit(int facility
+@@ -2009,7 +1958,7 @@ asmlinkage int vprintk_emit(int facility
}
EXPORT_SYMBOL(vprintk_emit);
@@ -227,7 +214,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
}
-@@ -2023,31 +1972,6 @@ asmlinkage __visible int printk(const ch
+@@ -2070,31 +2019,6 @@ asmlinkage __visible int printk(const ch
return r;
}
EXPORT_SYMBOL(printk);
@@ -259,8 +246,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* CONFIG_PRINTK */
#ifdef CONFIG_EARLY_PRINTK
-@@ -2343,15 +2267,10 @@ void console_unblank(void)
- void console_flush_on_panic(void)
+@@ -2391,15 +2315,10 @@ void console_unblank(void)
+ void console_flush_on_panic(enum con_flush_mode mode)
{
/*
- * If someone else is holding the console lock, trylock will fail
@@ -278,7 +265,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2700,43 +2619,6 @@ static int __init printk_late_init(void)
+@@ -2748,43 +2667,6 @@ static int __init printk_late_init(void)
late_initcall(printk_late_init);
#if defined CONFIG_PRINTK
@@ -322,7 +309,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static int printk_kthread_func(void *data)
{
struct prb_iterator iter;
-@@ -2802,22 +2684,9 @@ static int __init init_printk_kthread(vo
+@@ -2850,22 +2732,9 @@ static int __init init_printk_kthread(vo
}
late_initcall(init_printk_kthread);
diff --git a/debian/patches-rt/0025-x86-fpu-Add-a-fastpath-to-copy_fpstate_to_sigframe.patch b/debian/patches-rt/0025-x86-fpu-Add-a-fastpath-to-copy_fpstate_to_sigframe.patch
deleted file mode 100644
index 3d4f1df09..000000000
--- a/debian/patches-rt/0025-x86-fpu-Add-a-fastpath-to-copy_fpstate_to_sigframe.patch
+++ /dev/null
@@ -1,79 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 3 Apr 2019 15:59:12 +0200
-Subject: [PATCH 25/27] x86/fpu: Add a fastpath to copy_fpstate_to_sigframe()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-If the CPU holds the FPU register for the current task then we can try to save
-them directly to the userland stack frame. This has to be done with the
-pagefault disabled because we can't fault (while the FPU registers are locked)
-and therefore the operation might fail.
-If it fails try the slowpath which can handle faults.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/fpu/signal.c | 34 ++++++++++++++++++++++------------
- 1 file changed, 22 insertions(+), 12 deletions(-)
-
---- a/arch/x86/kernel/fpu/signal.c
-+++ b/arch/x86/kernel/fpu/signal.c
-@@ -144,8 +144,10 @@ static inline int copy_fpregs_to_sigfram
- * buf == buf_fx for 64-bit frames and 32-bit fsave frame.
- * buf != buf_fx for 32-bit frames with fxstate.
- *
-- * Save the state to task's fpu->state and then copy it to the user frame
-- * pointed by the aligned pointer 'buf_fx'.
-+ * Try to save it directly to the user frame with disabled page fault handler.
-+ * If this fails then do the slow path where the FPU state is first saved to
-+ * task's fpu->state and then copy it to the user frame pointed by the aligned
-+ * pointer 'buf_fx'.
- *
- * If this is a 32-bit frame with fxstate, put a fsave header before
- * the aligned state at 'buf_fx'.
-@@ -159,6 +161,7 @@ int copy_fpstate_to_sigframe(void __user
- struct xregs_state *xsave = &fpu->state.xsave;
- struct task_struct *tsk = current;
- int ia32_fxstate = (buf != buf_fx);
-+ int ret = -EFAULT;
-
- ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
- IS_ENABLED(CONFIG_IA32_EMULATION));
-@@ -174,22 +177,29 @@ int copy_fpstate_to_sigframe(void __user
- fpregs_lock();
- /*
- * If we do not need to load the FPU registers at return to userspace
-- * then the CPU has the current state and we need to save it. Otherwise
-- * it is already done and we can skip it.
-+ * then the CPU has the current state. Try to save it directly to
-+ * userland's stack frame if it does not cause a pagefault. If it does,
-+ * try the slowpath.
- */
- if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
-- copy_fpregs_to_fpstate(fpu);
-+ pagefault_disable();
-+ ret = copy_fpregs_to_sigframe(buf_fx);
-+ pagefault_enable();
-+ if (ret)
-+ copy_fpregs_to_fpstate(fpu);
- set_thread_flag(TIF_NEED_FPU_LOAD);
- }
- fpregs_unlock();
-
-- if (using_compacted_format()) {
-- if (copy_xstate_to_user(buf_fx, xsave, 0, size))
-- return -1;
-- } else {
-- fpstate_sanitize_xstate(fpu);
-- if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
-- return -1;
-+ if (ret) {
-+ if (using_compacted_format()) {
-+ if (copy_xstate_to_user(buf_fx, xsave, 0, size))
-+ return -1;
-+ } else {
-+ fpstate_sanitize_xstate(fpu);
-+ if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
-+ return -1;
-+ }
- }
-
- /* Save the fsave header for the 32-bit frames. */
diff --git a/debian/patches-rt/0026-x86-fpu-Restore-FPU-register-in-copy_fpstate_to_sigf.patch b/debian/patches-rt/0026-x86-fpu-Restore-FPU-register-in-copy_fpstate_to_sigf.patch
deleted file mode 100644
index b2e94d328..000000000
--- a/debian/patches-rt/0026-x86-fpu-Restore-FPU-register-in-copy_fpstate_to_sigf.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 3 Apr 2019 15:59:13 +0200
-Subject: [PATCH 26/27] x86/fpu: Restore FPU register in
- copy_fpstate_to_sigframe() in order to use the fastpath
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-If a task is scheduled out and receives a signal then it won't be able to take
-the fastpath because the register aren't available. The slowpath is more
-expensive compared to xrstor + xsave which usually succeeds.
-
-Some clock_gettime() numbers from a bigger box with AVX512 during bootup:
-- __fpregs_load_activate() takes 140ns - 350ns. If it was the most recent FPU
- context on the CPU then the optimisation in __fpregs_load_activate() will
- skip the load (which was disabled during the test).
-
-- copy_fpregs_to_sigframe() takes 200ns - 450ns if it succeeds. On a
- pagefault it is 1.8us - 3us usually in the 2.6us area.
-
-- The slowpath takes 1.5 - 6us. Usually in the 2.6us area.
-
-My testcases (including lat_sig) take the fastpath without
-__fpregs_load_activate(). I expect this to be the majority.
-
-Since the slowpath is in the >1us area it makes sense to load the
-registers and attempt to save them directly. The direct save may fail
-but should only happen on the first invocation or after fork() while the
-page is RO.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/fpu/signal.c | 25 +++++++++++++------------
- 1 file changed, 13 insertions(+), 12 deletions(-)
-
---- a/arch/x86/kernel/fpu/signal.c
-+++ b/arch/x86/kernel/fpu/signal.c
-@@ -176,19 +176,20 @@ int copy_fpstate_to_sigframe(void __user
-
- fpregs_lock();
- /*
-- * If we do not need to load the FPU registers at return to userspace
-- * then the CPU has the current state. Try to save it directly to
-- * userland's stack frame if it does not cause a pagefault. If it does,
-- * try the slowpath.
-+ * Load the FPU register if they are not valid for the current task.
-+ * With a valid FPU state we can attempt to save the state directly to
-+ * userland's stack frame which will likely succeed. If it does not, do
-+ * the slowpath.
- */
-- if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
-- pagefault_disable();
-- ret = copy_fpregs_to_sigframe(buf_fx);
-- pagefault_enable();
-- if (ret)
-- copy_fpregs_to_fpstate(fpu);
-- set_thread_flag(TIF_NEED_FPU_LOAD);
-- }
-+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
-+ __fpregs_load_activate();
-+
-+ pagefault_disable();
-+ ret = copy_fpregs_to_sigframe(buf_fx);
-+ pagefault_enable();
-+ if (ret && !test_thread_flag(TIF_NEED_FPU_LOAD))
-+ copy_fpregs_to_fpstate(fpu);
-+ set_thread_flag(TIF_NEED_FPU_LOAD);
- fpregs_unlock();
-
- if (ret) {
diff --git a/debian/patches-rt/0027-x86-pkeys-add-PKRU-value-to-init_fpstate.patch b/debian/patches-rt/0027-x86-pkeys-add-PKRU-value-to-init_fpstate.patch
deleted file mode 100644
index 6995613e9..000000000
--- a/debian/patches-rt/0027-x86-pkeys-add-PKRU-value-to-init_fpstate.patch
+++ /dev/null
@@ -1,74 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu, 21 Mar 2019 17:24:27 +0100
-Subject: [PATCH 27/27] x86/pkeys: add PKRU value to init_fpstate
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-The task's initiall PKRU value is set part for fpu__clear()/
-copy_init_pkru_to_fpregs(). It is not part of init_fpstate.xsave and
-instead it is set explictly.
-If the user removes the PKRU state from XSAVE in the signal handler then
-__fpu__restore_sig() will restore the missing bits from `init_fpstate'
-and initialize the PKRU value to 0.
-
-Add the `init_pkru_value' to `init_fpstate' so it is set to the init
-value in such a case.
-
-In theory we could drop copy_init_pkru_to_fpregs() because restoring the
-PKRU at return-to-userland should be enough.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/cpu/common.c | 5 +++++
- arch/x86/mm/pkeys.c | 6 ++++++
- 2 files changed, 11 insertions(+)
-
---- a/arch/x86/kernel/cpu/common.c
-+++ b/arch/x86/kernel/cpu/common.c
-@@ -372,6 +372,8 @@ static bool pku_disabled;
-
- static __always_inline void setup_pku(struct cpuinfo_x86 *c)
- {
-+ struct pkru_state *pk;
-+
- /* check the boot processor, plus compile options for PKU: */
- if (!cpu_feature_enabled(X86_FEATURE_PKU))
- return;
-@@ -382,6 +384,9 @@ static __always_inline void setup_pku(st
- return;
-
- cr4_set_bits(X86_CR4_PKE);
-+ pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU);
-+ if (pk)
-+ pk->pkru = init_pkru_value;
- /*
- * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
- * cpuid bit to be set. We need to ensure that we
---- a/arch/x86/mm/pkeys.c
-+++ b/arch/x86/mm/pkeys.c
-@@ -18,6 +18,7 @@
-
- #include <asm/cpufeature.h> /* boot_cpu_has, ... */
- #include <asm/mmu_context.h> /* vma_pkey() */
-+#include <asm/fpu/internal.h> /* init_fpstate */
-
- int __execute_only_pkey(struct mm_struct *mm)
- {
-@@ -161,6 +162,7 @@ static ssize_t init_pkru_read_file(struc
- static ssize_t init_pkru_write_file(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
- {
-+ struct pkru_state *pk;
- char buf[32];
- ssize_t len;
- u32 new_init_pkru;
-@@ -183,6 +185,10 @@ static ssize_t init_pkru_write_file(stru
- return -EINVAL;
-
- WRITE_ONCE(init_pkru_value, new_init_pkru);
-+ pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU);
-+ if (!pk)
-+ return -EINVAL;
-+ pk->pkru = new_init_pkru;
- return count;
- }
-
diff --git a/debian/patches-rt/0028-x86-fpu-Fault-in-user-stack-if-copy_fpstate_to_sigfr.patch b/debian/patches-rt/0028-x86-fpu-Fault-in-user-stack-if-copy_fpstate_to_sigfr.patch
deleted file mode 100644
index 5e3e058c0..000000000
--- a/debian/patches-rt/0028-x86-fpu-Fault-in-user-stack-if-copy_fpstate_to_sigfr.patch
+++ /dev/null
@@ -1,105 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 29 Apr 2019 18:39:53 +0200
-Subject: [PATCH] x86/fpu: Fault-in user stack if copy_fpstate_to_sigframe()
- fails
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-In the compacted form, XSAVES may save only the XMM+SSE state but skip
-FP (x87 state).
-
-This is denoted by header->xfeatures = 6. The fastpath
-(copy_fpregs_to_sigframe()) does that but _also_ initialises the FP
-state (cwd to 0x37f, mxcsr as we do, remaining fields to 0).
-
-The slowpath (copy_xstate_to_user()) leaves most of the FP
-state untouched. Only mxcsr and mxcsr_flags are set due to
-xfeatures_mxcsr_quirk(). Now that XFEATURE_MASK_FP is set
-unconditionally, see
-
- 04944b793e18 ("x86: xsave: set FP, SSE bits in the xsave header in the user sigcontext"),
-
-on return from the signal, random garbage is loaded as the FP state.
-
-Instead of utilizing copy_xstate_to_user(), fault-in the user memory
-and retry the fast path. Ideally, the fast path succeeds on the second
-attempt but may be retried again if the memory is swapped out due
-to memory pressure. If the user memory can not be faulted-in then
-get_user_pages() returns an error so we don't loop forever.
-
-Fault in memory via get_user_pages() so copy_fpregs_to_sigframe()
-succeeds without a fault.
-
-Fixes: 69277c98f5eef ("x86/fpu: Always store the registers in copy_fpstate_to_sigframe()")
-Reported-by: Kurt Kanzenbach <kurt.kanzenbach@linutronix.de>
-Suggested-by: Dave Hansen <dave.hansen@intel.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Borislav Petkov <bp@suse.de>
-Acked-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: Andy Lutomirski <luto@amacapital.net>
-Cc: Dave Hansen <dave.hansen@intel.com>
-Cc: "H. Peter Anvin" <hpa@zytor.com>
-Cc: Ingo Molnar <mingo@kernel.org>
-Cc: Jann Horn <jannh@google.com>
-Cc: Jason@zx2c4.com
-Cc: kvm ML <kvm@vger.kernel.org>
-Cc: Paolo Bonzini <pbonzini@redhat.com>
-Cc: Rik van Riel <riel@surriel.com>
-Cc: rkrcmar@redhat.com
-Cc: x86-ml <x86@kernel.org>
-Link: https://lkml.kernel.org/r/20190429163953.gqxgsc5okqxp4olv@linutronix.de
----
- arch/x86/kernel/fpu/signal.c | 25 ++++++++++++++-----------
- 1 file changed, 14 insertions(+), 11 deletions(-)
-
---- a/arch/x86/kernel/fpu/signal.c
-+++ b/arch/x86/kernel/fpu/signal.c
-@@ -158,7 +158,6 @@ static inline int copy_fpregs_to_sigfram
- int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
- {
- struct fpu *fpu = &current->thread.fpu;
-- struct xregs_state *xsave = &fpu->state.xsave;
- struct task_struct *tsk = current;
- int ia32_fxstate = (buf != buf_fx);
- int ret = -EFAULT;
-@@ -174,12 +173,13 @@ int copy_fpstate_to_sigframe(void __user
- sizeof(struct user_i387_ia32_struct), NULL,
- (struct _fpstate_32 __user *) buf) ? -1 : 1;
-
-+retry:
- fpregs_lock();
- /*
- * Load the FPU register if they are not valid for the current task.
- * With a valid FPU state we can attempt to save the state directly to
-- * userland's stack frame which will likely succeed. If it does not, do
-- * the slowpath.
-+ * userland's stack frame which will likely succeed. If it does not,
-+ * resolve the fault in the user memory and try again.
- */
- if (test_thread_flag(TIF_NEED_FPU_LOAD))
- __fpregs_load_activate();
-@@ -193,14 +193,17 @@ int copy_fpstate_to_sigframe(void __user
- fpregs_unlock();
-
- if (ret) {
-- if (using_compacted_format()) {
-- if (copy_xstate_to_user(buf_fx, xsave, 0, size))
-- return -1;
-- } else {
-- fpstate_sanitize_xstate(fpu);
-- if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
-- return -1;
-- }
-+ int aligned_size;
-+ int nr_pages;
-+
-+ aligned_size = offset_in_page(buf_fx) + fpu_user_xstate_size;
-+ nr_pages = DIV_ROUND_UP(aligned_size, PAGE_SIZE);
-+
-+ ret = get_user_pages((unsigned long)buf_fx, nr_pages,
-+ FOLL_WRITE, NULL, NULL);
-+ if (ret == nr_pages)
-+ goto retry;
-+ return -EFAULT;
- }
-
- /* Save the fsave header for the 32-bit frames. */
diff --git a/debian/patches-rt/0029-x86-fpu-Remove-unnecessary-saving-of-FPU-registers-i.patch b/debian/patches-rt/0029-x86-fpu-Remove-unnecessary-saving-of-FPU-registers-i.patch
deleted file mode 100644
index 9749a3a67..000000000
--- a/debian/patches-rt/0029-x86-fpu-Remove-unnecessary-saving-of-FPU-registers-i.patch
+++ /dev/null
@@ -1,67 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 30 Apr 2019 10:31:26 +0200
-Subject: [PATCH] x86/fpu: Remove unnecessary saving of FPU registers in
- copy_fpstate_to_sigframe()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Since commit:
-
- eeec00d73be2e ("x86/fpu: Fault-in user stack if copy_fpstate_to_sigframe() fails")
-
-there is no need to have FPU registers saved if
-copy_fpregs_to_sigframe() fails, because we retry it after we resolved
-the fault condition.
-
-Saving the registers is not wrong but it is not necessary and it forces us
-to load the FPU registers on the retry attempt.
-
-Don't save the FPU registers if copy_fpstate_to_sigframe() fails.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Cc: Andy Lutomirski <luto@kernel.org>
-Cc: Borislav Petkov <bp@alien8.de>
-Cc: Dave Hansen <dave.hansen@intel.com>
-Cc: Fenghua Yu <fenghua.yu@intel.com>
-Cc: H. Peter Anvin <hpa@zytor.com>
-Cc: Jason@zx2c4.com
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Oleg Nesterov <oleg@redhat.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Cc: bp@suse.de
-Cc: jannh@google.com
-Cc: kurt.kanzenbach@linutronix.de
-Cc: kvm@vger.kernel.org
-Cc: pbonzini@redhat.com
-Cc: riel@surriel.com
-Cc: rkrcmar@redhat.com
-Link: http://lkml.kernel.org/r/20190430083126.rilbb76yc27vrem5@linutronix.de
-Signed-off-by: Ingo Molnar <mingo@kernel.org>
----
- arch/x86/kernel/fpu/signal.c | 6 +-----
- 1 file changed, 1 insertion(+), 5 deletions(-)
-
---- a/arch/x86/kernel/fpu/signal.c
-+++ b/arch/x86/kernel/fpu/signal.c
-@@ -157,10 +157,9 @@ static inline int copy_fpregs_to_sigfram
- */
- int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
- {
-- struct fpu *fpu = &current->thread.fpu;
- struct task_struct *tsk = current;
- int ia32_fxstate = (buf != buf_fx);
-- int ret = -EFAULT;
-+ int ret;
-
- ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
- IS_ENABLED(CONFIG_IA32_EMULATION));
-@@ -187,9 +186,6 @@ int copy_fpstate_to_sigframe(void __user
- pagefault_disable();
- ret = copy_fpregs_to_sigframe(buf_fx);
- pagefault_enable();
-- if (ret && !test_thread_flag(TIF_NEED_FPU_LOAD))
-- copy_fpregs_to_fpstate(fpu);
-- set_thread_flag(TIF_NEED_FPU_LOAD);
- fpregs_unlock();
-
- if (ret) {
diff --git a/debian/patches-rt/ARM-enable-irq-in-translation-section-permission-fau.patch b/debian/patches-rt/ARM-enable-irq-in-translation-section-permission-fau.patch
index 4cffc970f..21e83543c 100644
--- a/debian/patches-rt/ARM-enable-irq-in-translation-section-permission-fau.patch
+++ b/debian/patches-rt/ARM-enable-irq-in-translation-section-permission-fau.patch
@@ -1,7 +1,7 @@
From: "Yadi.hu" <yadi.hu@windriver.com>
Date: Wed, 10 Dec 2014 10:32:09 +0800
Subject: ARM: enable irq in translation/section permission fault handlers
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Probably happens on all ARM, with
CONFIG_PREEMPT_RT_FULL
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
-@@ -437,6 +437,9 @@ do_translation_fault(unsigned long addr,
+@@ -434,6 +434,9 @@ do_translation_fault(unsigned long addr,
if (addr < TASK_SIZE)
return do_page_fault(addr, fsr, regs);
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (user_mode(regs))
goto bad_area;
-@@ -504,6 +507,9 @@ do_translation_fault(unsigned long addr,
+@@ -501,6 +504,9 @@ do_translation_fault(unsigned long addr,
static int
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
diff --git a/debian/patches-rt/Drivers-hv-vmbus-include-header-for-get_irq_regs.patch b/debian/patches-rt/Drivers-hv-vmbus-include-header-for-get_irq_regs.patch
index 560d10b7c..d0dddfd42 100644
--- a/debian/patches-rt/Drivers-hv-vmbus-include-header-for-get_irq_regs.patch
+++ b/debian/patches-rt/Drivers-hv-vmbus-include-header-for-get_irq_regs.patch
@@ -4,7 +4,7 @@ Subject: [PATCH] Drivers: hv: vmbus: include header for get_irq_regs()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
On !RT the header file get_irq_regs() gets pulled in via other header files. On
RT it does not and the build fails:
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
-@@ -31,6 +31,7 @@
+@@ -18,6 +18,7 @@
#include <linux/atomic.h>
#include <linux/hyperv.h>
#include <linux/interrupt.h>
diff --git a/debian/patches-rt/EXP-rcu-skip_workqueue.patch b/debian/patches-rt/EXP-rcu-skip_workqueue.patch
deleted file mode 100644
index 04b56aaab..000000000
--- a/debian/patches-rt/EXP-rcu-skip_workqueue.patch
+++ /dev/null
@@ -1,28 +0,0 @@
-From: Paul E. McKenney <paulmck@linux.ibm.com>
-Date: Mon, 29 Oct 2018 11:53:01 +0100
-Subject: [PATCH] EXP rcu: skip the workqueue path on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-The function sync_rcu_exp_select_cpus() is only invoked once on boot because
-the "force expedite" is not enabled on RT. In that case on RT we have like we
-wouldn't have workqueues available so that we don't attempt to schedule them
-with disabled preemption.
-
-Suggested-by: Paul E. McKenney <paulmck@linux.ibm.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/rcu/tree_exp.h | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
---- a/kernel/rcu/tree_exp.h
-+++ b/kernel/rcu/tree_exp.h
-@@ -441,7 +441,8 @@ static void sync_rcu_exp_select_cpus(smp
- if (!READ_ONCE(rnp->expmask))
- continue; /* Avoid early boot non-existent wq. */
- rnp->rew.rew_func = func;
-- if (!READ_ONCE(rcu_par_gp_wq) ||
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) ||
-+ !READ_ONCE(rcu_par_gp_wq) ||
- rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
- rcu_is_last_leaf_node(rnp)) {
- /* No workqueues yet or last leaf, do direct call. */
diff --git a/debian/patches-rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/debian/patches-rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
index 8216eb5ec..3ab78b247 100644
--- a/debian/patches-rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
+++ b/debian/patches-rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
@@ -1,7 +1,7 @@
From: Josh Cartwright <joshc@ni.com>
Date: Thu, 11 Feb 2016 11:54:01 -0600
Subject: KVM: arm/arm64: downgrade preempt_disable()d region to migrate_disable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
kvm_arch_vcpu_ioctl_run() disables the use of preemption when updating
the vgic and timer states to prevent the calling task from migrating to
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
-@@ -709,7 +709,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -691,7 +691,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
* involves poking the GIC, which must be done in a
* non-preemptible context.
*/
@@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
kvm_pmu_flush_hwstate(vcpu);
-@@ -758,7 +758,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -740,7 +740,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu);
local_irq_enable();
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
}
-@@ -836,7 +836,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -818,7 +818,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
/* Exit types that need handling before we can be preempted */
handle_exit_early(vcpu, run, ret);
diff --git a/debian/patches-rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch b/debian/patches-rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
index 7b7049d7a..1e9de1c1d 100644
--- a/debian/patches-rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
+++ b/debian/patches-rt/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
@@ -5,7 +5,7 @@ Cc: Anna Schumaker <anna.schumaker@netapp.com>,
linux-nfs@vger.kernel.org, linux-kernel@vger.kernel.org,
tglx@linutronix.de
Subject: NFSv4: replace seqcount_t with a seqlock_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The raw_write_seqcount_begin() in nfs4_reclaim_open_state() bugs me
because it maps to preempt_disable() in -RT which I can't have at this
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
-@@ -151,11 +151,11 @@ static int nfs_delegation_claim_opens(st
+@@ -152,11 +152,11 @@ static int nfs_delegation_claim_opens(st
sp = state->owner;
/* Block nfs4_proc_unlck */
mutex_lock(&sp->so_delegreturn_mutex);
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
-@@ -2892,7 +2892,7 @@ static int _nfs4_open_and_get_state(stru
+@@ -2911,7 +2911,7 @@ static int _nfs4_open_and_get_state(stru
unsigned int seq;
int ret;
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ret = _nfs4_proc_open(opendata, ctx);
if (ret != 0)
-@@ -2933,7 +2933,7 @@ static int _nfs4_open_and_get_state(stru
+@@ -2952,7 +2952,7 @@ static int _nfs4_open_and_get_state(stru
if (d_inode(dentry) == state->inode) {
nfs_inode_attach_open_context(ctx);
@@ -78,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
-@@ -506,7 +506,7 @@ nfs4_alloc_state_owner(struct nfs_server
+@@ -510,7 +510,7 @@ nfs4_alloc_state_owner(struct nfs_server
nfs4_init_seqid_counter(&sp->so_seqid);
atomic_set(&sp->so_count, 1);
INIT_LIST_HEAD(&sp->so_lru);
@@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
mutex_init(&sp->so_delegreturn_mutex);
return sp;
}
-@@ -1611,8 +1611,12 @@ static int nfs4_reclaim_open_state(struc
+@@ -1616,8 +1616,12 @@ static int nfs4_reclaim_open_state(struc
* recovering after a network partition or a reboot from a
* server that doesn't support a grace period.
*/
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
restart:
list_for_each_entry(state, &sp->so_states, open_states) {
if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
-@@ -1665,14 +1669,20 @@ static int nfs4_reclaim_open_state(struc
+@@ -1670,14 +1674,20 @@ static int nfs4_reclaim_open_state(struc
spin_lock(&sp->so_lock);
goto restart;
}
diff --git a/debian/patches-rt/add_migrate_disable.patch b/debian/patches-rt/add_migrate_disable.patch
index 474f574ef..95f94a163 100644
--- a/debian/patches-rt/add_migrate_disable.patch
+++ b/debian/patches-rt/add_migrate_disable.patch
@@ -1,19 +1,19 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat, 27 May 2017 19:02:06 +0200
Subject: kernel/sched/core: add migrate_disable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
---
include/linux/preempt.h | 23 ++++++++
include/linux/sched.h | 7 ++
include/linux/smp.h | 3 +
- kernel/sched/core.c | 130 +++++++++++++++++++++++++++++++++++++++++++++++-
+ kernel/sched/core.c | 129 +++++++++++++++++++++++++++++++++++++++++++++++-
kernel/sched/debug.c | 4 +
- 5 files changed, 165 insertions(+), 2 deletions(-)
+ 5 files changed, 164 insertions(+), 2 deletions(-)
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -182,6 +182,22 @@ do { \
+@@ -191,6 +191,22 @@ do { \
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
@@ -36,7 +36,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.
#ifdef CONFIG_PREEMPT
#define preempt_enable() \
do { \
-@@ -250,6 +266,13 @@ do { \
+@@ -259,6 +275,13 @@ do { \
#define preempt_enable_notrace() barrier()
#define preemptible() 0
@@ -52,7 +52,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.
#ifdef MODULE
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -661,6 +661,13 @@ struct task_struct {
+@@ -653,6 +653,13 @@ struct task_struct {
int nr_cpus_allowed;
const cpumask_t *cpus_ptr;
cpumask_t cpus_mask;
@@ -80,7 +80,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.
* boot command line:
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1026,7 +1026,15 @@ void set_cpus_allowed_common(struct task
+@@ -1060,7 +1060,15 @@ void set_cpus_allowed_common(struct task
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
@@ -97,7 +97,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.
{
struct rq *rq = task_rq(p);
bool queued, running;
-@@ -1055,6 +1063,20 @@ void do_set_cpus_allowed(struct task_str
+@@ -1089,6 +1097,20 @@ void do_set_cpus_allowed(struct task_str
set_curr_task(rq, p);
}
@@ -118,7 +118,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
-@@ -1113,9 +1135,16 @@ static int __set_cpus_allowed_ptr(struct
+@@ -1147,9 +1169,16 @@ static int __set_cpus_allowed_ptr(struct
}
/* Can the task run on the task's current CPU? If so, we're done */
@@ -136,7 +136,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.
dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
if (task_running(rq, p) || p->state == TASK_WAKING) {
struct migration_arg arg = { p, dest_cpu };
-@@ -7072,3 +7101,100 @@ const u32 sched_prio_to_wmult[40] = {
+@@ -7086,3 +7115,99 @@ const u32 sched_prio_to_wmult[40] = {
};
#undef CREATE_TRACE_POINTS
@@ -229,7 +229,6 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.
+
+ preempt_enable();
+ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
-+ tlb_migrate_finish(p->mm);
+ return;
+ }
+ }
@@ -239,7 +238,7 @@ Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.
+#endif
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
-@@ -982,6 +982,10 @@ void proc_sched_show_task(struct task_st
+@@ -979,6 +979,10 @@ void proc_sched_show_task(struct task_st
P(dl.runtime);
P(dl.deadline);
}
diff --git a/debian/patches-rt/apparmor-use-a-locallock-instead-preempt_disable.patch b/debian/patches-rt/apparmor-use-a-locallock-instead-preempt_disable.patch
index 7a4f992eb..ab68ef862 100644
--- a/debian/patches-rt/apparmor-use-a-locallock-instead-preempt_disable.patch
+++ b/debian/patches-rt/apparmor-use-a-locallock-instead-preempt_disable.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 11 Oct 2017 17:43:49 +0200
Subject: apparmor: use a locallock instead preempt_disable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
get_buffers() disables preemption which acts as a lock for the per-CPU
variable. Since we can't disable preemption here on RT, a local_lock is
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/security/apparmor/include/path.h
+++ b/security/apparmor/include/path.h
-@@ -40,8 +40,10 @@ struct aa_buffers {
+@@ -36,8 +36,10 @@ struct aa_buffers {
#include <linux/percpu.h>
#include <linux/preempt.h>
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define ASSIGN(FN, A, X, N) ((X) = FN(A, N))
#define EVAL1(FN, A, X) ASSIGN(FN, A, X, 0) /*X = FN(0)*/
-@@ -51,7 +53,17 @@ DECLARE_PER_CPU(struct aa_buffers, aa_bu
+@@ -47,7 +49,17 @@ DECLARE_PER_CPU(struct aa_buffers, aa_bu
#define for_each_cpu_buffer(I) for ((I) = 0; (I) < MAX_PATH_BUFFERS; (I)++)
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define AA_BUG_PREEMPT_ENABLED(X) AA_BUG(preempt_count() <= 0, X)
#else
#define AA_BUG_PREEMPT_ENABLED(X) /* nop */
-@@ -67,14 +79,15 @@ DECLARE_PER_CPU(struct aa_buffers, aa_bu
+@@ -63,14 +75,15 @@ DECLARE_PER_CPU(struct aa_buffers, aa_bu
#define get_buffers(X...) \
do { \
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* __AA_PATH_H */
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
-@@ -48,7 +48,7 @@
+@@ -44,7 +44,7 @@
int apparmor_initialized;
DEFINE_PER_CPU(struct aa_buffers, aa_buffers);
diff --git a/debian/patches-rt/arch-arm64-Add-lazy-preempt-support.patch b/debian/patches-rt/arch-arm64-Add-lazy-preempt-support.patch
index f44679a13..a18843a7a 100644
--- a/debian/patches-rt/arch-arm64-Add-lazy-preempt-support.patch
+++ b/debian/patches-rt/arch-arm64-Add-lazy-preempt-support.patch
@@ -1,7 +1,7 @@
From: Anders Roxell <anders.roxell@linaro.org>
Date: Thu, 14 May 2015 17:52:17 +0200
Subject: arch/arm64: Add lazy preempt support
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
arm64 is missing support for PREEMPT_RT. The main feature which is
lacking is support for lazy preemption. The arch-specific entry code,
@@ -22,14 +22,14 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -146,6 +146,7 @@ config ARM64
+@@ -152,6 +152,7 @@ config ARM64
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_PREEMPT_LAZY
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_RCU_TABLE_FREE
- select HAVE_RCU_TABLE_INVALIDATE
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -70,13 +70,34 @@ static inline bool __preempt_count_dec_a
@@ -70,7 +70,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
#ifdef CONFIG_PREEMPT
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
-@@ -42,6 +42,7 @@ struct thread_info {
+@@ -31,6 +31,7 @@ struct thread_info {
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
u64 ttbr0; /* saved TTBR0_EL1 */
#endif
@@ -78,7 +78,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
union {
u64 preempt_count; /* 0 => preemptible, <0 => bug */
struct {
-@@ -87,6 +88,7 @@ void arch_release_task_struct(struct tas
+@@ -75,6 +76,7 @@ void arch_release_task_struct(struct tas
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
#define TIF_FSCHECK 5 /* Check FS is USER_DS on return */
@@ -86,7 +86,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
#define TIF_NOHZ 7
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
-@@ -105,6 +107,7 @@ void arch_release_task_struct(struct tas
+@@ -93,6 +95,7 @@ void arch_release_task_struct(struct tas
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
@@ -94,7 +94,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
#define _TIF_NOHZ (1 << TIF_NOHZ)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
-@@ -117,8 +120,9 @@ void arch_release_task_struct(struct tas
+@@ -105,8 +108,9 @@ void arch_release_task_struct(struct tas
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
@@ -107,7 +107,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
_TIF_NOHZ)
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
-@@ -41,6 +41,7 @@ int main(void)
+@@ -30,6 +30,7 @@ int main(void)
BLANK();
DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
@@ -117,36 +117,29 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
-@@ -610,9 +610,16 @@ ENDPROC(el1_sync)
-
- #ifdef CONFIG_PREEMPT
- ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
-- cbnz x24, 1f // preempt count != 0
-- bl el1_preempt
-+ cbnz x24, 2f // preempt count != 0
+@@ -647,9 +647,17 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKIN
+ mrs x0, daif
+ orr x24, x24, x0
+ alternative_else_nop_endif
+- cbnz x24, 1f // preempt count != 0 || NMI return path
+- bl preempt_schedule_irq // irq en/disable is done inside
++
++ cbnz x24, 2f // preempt count != 0
+
-+ ldr w24, [tsk, #TSK_TI_PREEMPT_LAZY] // get preempt lazy count
-+ cbnz w24, 2f // preempt lazy count != 0
++ ldr w24, [tsk, #TSK_TI_PREEMPT_LAZY] // get preempt lazy count
++ cbnz w24, 2f // preempt lazy count != 0
+
-+ ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
-+ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling?
++ ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
++ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling?
1:
-+ bl el1_preempt
++ bl preempt_schedule_irq // irq en/disable is done inside
+2:
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_on
-@@ -626,6 +633,7 @@ ENDPROC(el1_irq)
- 1: bl preempt_schedule_irq // irq en/disable is done inside
- ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
- tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
-+ tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling?
- ret x24
- #endif
-
+ #ifdef CONFIG_ARM64_PSEUDO_NMI
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
-@@ -926,7 +926,7 @@ asmlinkage void do_notify_resume(struct
+@@ -910,7 +910,7 @@ asmlinkage void do_notify_resume(struct
/* Check valid user FS if needed */
addr_limit_user_check();
diff --git a/debian/patches-rt/arm-disable-NEON-in-kernel-mode.patch b/debian/patches-rt/arm-disable-NEON-in-kernel-mode.patch
index b4becf160..d3d61c4a4 100644
--- a/debian/patches-rt/arm-disable-NEON-in-kernel-mode.patch
+++ b/debian/patches-rt/arm-disable-NEON-in-kernel-mode.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 1 Dec 2017 10:42:03 +0100
Subject: [PATCH] arm*: disable NEON in kernel mode
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
NEON in kernel mode is used by the crypto algorithms and raid6 code.
While the raid6 code looks okay, the crypto algorithms do not: NEON
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -2131,7 +2131,7 @@ config NEON
+@@ -2135,7 +2135,7 @@ config NEON
config KERNEL_MODE_NEON
bool "Support for NEON in kernel mode"
diff --git a/debian/patches-rt/arm-enable-highmem-for-rt.patch b/debian/patches-rt/arm-enable-highmem-for-rt.patch
index 8ce184c9d..6d550033e 100644
--- a/debian/patches-rt/arm-enable-highmem-for-rt.patch
+++ b/debian/patches-rt/arm-enable-highmem-for-rt.patch
@@ -1,7 +1,7 @@
Subject: arm: Enable highmem for rt
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 13 Feb 2013 11:03:11 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
fixup highmem for ARM.
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
-@@ -34,6 +34,11 @@ static inline pte_t get_fixmap_pte(unsig
+@@ -31,6 +31,11 @@ static inline pte_t get_fixmap_pte(unsig
return *ptep;
}
@@ -50,7 +50,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *kmap(struct page *page)
{
might_sleep();
-@@ -54,12 +59,13 @@ EXPORT_SYMBOL(kunmap);
+@@ -51,12 +56,13 @@ EXPORT_SYMBOL(kunmap);
void *kmap_atomic(struct page *page)
{
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
-@@ -79,7 +85,7 @@ void *kmap_atomic(struct page *page)
+@@ -76,7 +82,7 @@ void *kmap_atomic(struct page *page)
type = kmap_atomic_idx_push();
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
vaddr = __fix_to_virt(idx);
#ifdef CONFIG_DEBUG_HIGHMEM
/*
-@@ -93,7 +99,10 @@ void *kmap_atomic(struct page *page)
+@@ -90,7 +96,10 @@ void *kmap_atomic(struct page *page)
* in place, so the contained TLB flush ensures the TLB is updated
* with the new mapping.
*/
@@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return (void *)vaddr;
}
-@@ -106,10 +115,13 @@ void __kunmap_atomic(void *kvaddr)
+@@ -103,10 +112,13 @@ void __kunmap_atomic(void *kvaddr)
if (kvaddr >= (void *)FIXADDR_START) {
type = kmap_atomic_idx();
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(idx));
#else
-@@ -122,28 +134,56 @@ void __kunmap_atomic(void *kvaddr)
+@@ -119,28 +131,56 @@ void __kunmap_atomic(void *kvaddr)
kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
}
pagefault_enable();
diff --git a/debian/patches-rt/arm-highmem-flush-tlb-on-unmap.patch b/debian/patches-rt/arm-highmem-flush-tlb-on-unmap.patch
index e5d57856b..da856345b 100644
--- a/debian/patches-rt/arm-highmem-flush-tlb-on-unmap.patch
+++ b/debian/patches-rt/arm-highmem-flush-tlb-on-unmap.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 11 Mar 2013 21:37:27 +0100
Subject: arm/highmem: Flush tlb on unmap
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The tlb should be flushed on unmap and thus make the mapping entry
invalid. This is only done in the non-debug case which does not look
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
-@@ -112,10 +112,10 @@ void __kunmap_atomic(void *kvaddr)
+@@ -109,10 +109,10 @@ void __kunmap_atomic(void *kvaddr)
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(idx));
diff --git a/debian/patches-rt/arm-imx6-cpuidle-Use-raw_spinlock_t.patch b/debian/patches-rt/arm-imx6-cpuidle-Use-raw_spinlock_t.patch
new file mode 100644
index 000000000..9c6a75209
--- /dev/null
+++ b/debian/patches-rt/arm-imx6-cpuidle-Use-raw_spinlock_t.patch
@@ -0,0 +1,43 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 14 May 2019 17:07:44 +0200
+Subject: [PATCH] arm: imx6: cpuidle: Use raw_spinlock_t
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+The idle call back is invoked with disabled interrupts and requires
+raw_spinlock_t locks to work.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm/mach-imx/cpuidle-imx6q.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/arch/arm/mach-imx/cpuidle-imx6q.c
++++ b/arch/arm/mach-imx/cpuidle-imx6q.c
+@@ -14,22 +14,22 @@
+ #include "hardware.h"
+
+ static int num_idle_cpus = 0;
+-static DEFINE_SPINLOCK(cpuidle_lock);
++static DEFINE_RAW_SPINLOCK(cpuidle_lock);
+
+ static int imx6q_enter_wait(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+ {
+- spin_lock(&cpuidle_lock);
++ raw_spin_lock(&cpuidle_lock);
+ if (++num_idle_cpus == num_online_cpus())
+ imx6_set_lpm(WAIT_UNCLOCKED);
+- spin_unlock(&cpuidle_lock);
++ raw_spin_unlock(&cpuidle_lock);
+
+ cpu_do_idle();
+
+- spin_lock(&cpuidle_lock);
++ raw_spin_lock(&cpuidle_lock);
+ if (num_idle_cpus-- == num_online_cpus())
+ imx6_set_lpm(WAIT_CLOCKED);
+- spin_unlock(&cpuidle_lock);
++ raw_spin_unlock(&cpuidle_lock);
+
+ return index;
+ }
diff --git a/debian/patches-rt/arm-include-definition-for-cpumask_t.patch b/debian/patches-rt/arm-include-definition-for-cpumask_t.patch
index 2c46f6820..aff936550 100644
--- a/debian/patches-rt/arm-include-definition-for-cpumask_t.patch
+++ b/debian/patches-rt/arm-include-definition-for-cpumask_t.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 22 Dec 2016 17:28:33 +0100
Subject: [PATCH] arm: include definition for cpumask_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
This definition gets pulled in by other files. With the (later) split of
RCU and spinlock.h it won't compile anymore.
diff --git a/debian/patches-rt/arm-preempt-lazy-support.patch b/debian/patches-rt/arm-preempt-lazy-support.patch
index 6cf830683..3ecbcf0f3 100644
--- a/debian/patches-rt/arm-preempt-lazy-support.patch
+++ b/debian/patches-rt/arm-preempt-lazy-support.patch
@@ -1,7 +1,7 @@
Subject: arm: Add support for lazy preemption
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 31 Oct 2012 12:04:11 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Implement the arm pieces for lazy preempt.
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -90,6 +90,7 @@ config ARM
+@@ -94,6 +94,7 @@ config ARM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select HAVE_RSEQ
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
-@@ -49,6 +49,7 @@ struct cpu_context_save {
+@@ -46,6 +46,7 @@ struct cpu_context_save {
struct thread_info {
unsigned long flags; /* low level flags */
int preempt_count; /* 0 => preemptable, <0 => bug */
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
__u32 cpu; /* cpu */
-@@ -142,7 +143,8 @@ extern int vfp_restore_user_hwstate(stru
+@@ -139,7 +140,8 @@ extern int vfp_restore_user_hwstate(stru
#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TIF_NOHZ 12 /* in adaptive nohz mode */
#define TIF_USING_IWMMXT 17
-@@ -152,6 +154,7 @@ extern int vfp_restore_user_hwstate(stru
+@@ -149,6 +151,7 @@ extern int vfp_restore_user_hwstate(stru
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
-@@ -167,7 +170,8 @@ extern int vfp_restore_user_hwstate(stru
+@@ -164,7 +167,8 @@ extern int vfp_restore_user_hwstate(stru
* Change these and you break ASM code in entry-common.S
*/
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif /* __ASM_ARM_THREAD_INFO_H */
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
-@@ -56,6 +56,7 @@ int main(void)
+@@ -53,6 +53,7 @@ int main(void)
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
@@ -75,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
-@@ -216,11 +216,18 @@ ENDPROC(__dabt_svc)
+@@ -213,11 +213,18 @@ ENDPROC(__dabt_svc)
#ifdef CONFIG_PREEMPT
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
@@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
svc_exit r5, irq = 1 @ return from exception
-@@ -235,8 +242,14 @@ ENDPROC(__irq_svc)
+@@ -232,8 +239,14 @@ ENDPROC(__irq_svc)
1: bl preempt_schedule_irq @ irq en/disable is done inside
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED
@@ -114,7 +114,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
__und_fault:
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
-@@ -56,7 +56,9 @@ saved_pc .req lr
+@@ -53,7 +53,9 @@ saved_pc .req lr
cmp r2, #TASK_SIZE
blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
@@ -125,7 +125,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bne fast_work_pending
-@@ -93,8 +95,11 @@ ENDPROC(ret_fast_syscall)
+@@ -90,8 +92,11 @@ ENDPROC(ret_fast_syscall)
cmp r2, #TASK_SIZE
blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
@@ -140,7 +140,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
-@@ -652,7 +652,8 @@ do_work_pending(struct pt_regs *regs, un
+@@ -648,7 +648,8 @@ do_work_pending(struct pt_regs *regs, un
*/
trace_hardirqs_off();
do {
diff --git a/debian/patches-rt/arm-remove-printk_nmi_.patch b/debian/patches-rt/arm-remove-printk_nmi_.patch
index 3fd7e09a0..70e32c8c1 100644
--- a/debian/patches-rt/arm-remove-printk_nmi_.patch
+++ b/debian/patches-rt/arm-remove-printk_nmi_.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 15 Feb 2019 14:34:20 +0100
Subject: [PATCH] arm: remove printk_nmi_.*()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
It is no longer provided by the printk core code.
@@ -12,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
-@@ -684,11 +684,9 @@ void handle_IPI(int ipinr, struct pt_reg
+@@ -679,11 +679,9 @@ void handle_IPI(int ipinr, struct pt_reg
break;
case IPI_CPU_BACKTRACE:
diff --git a/debian/patches-rt/arm64-KVM-compute_layout-before-altenates-are-applie.patch b/debian/patches-rt/arm64-KVM-compute_layout-before-altenates-are-applie.patch
index bcbb85923..7ccd91091 100644
--- a/debian/patches-rt/arm64-KVM-compute_layout-before-altenates-are-applie.patch
+++ b/debian/patches-rt/arm64-KVM-compute_layout-before-altenates-are-applie.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 26 Jul 2018 09:13:42 +0200
Subject: [PATCH] arm64: KVM: compute_layout before altenates are applied
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
compute_layout() is invoked as part of an alternative fixup under
stop_machine() and needs a sleeping lock as part of get_random_long().
@@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
" .if " __stringify(cb) " == 0\n" \
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
-@@ -224,6 +224,7 @@ static int __apply_alternatives_multi_st
+@@ -238,6 +238,7 @@ static int __apply_alternatives_multi_st
void __init apply_alternatives_all(void)
{
/* better not try code patching on a live SMP system */
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
-@@ -33,7 +33,7 @@ static u8 tag_lsb;
+@@ -22,7 +22,7 @@ static u8 tag_lsb;
static u64 tag_val;
static u64 va_mask;
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
u64 hyp_va_msb;
-@@ -121,8 +121,6 @@ void __init kvm_update_va_mask(struct al
+@@ -110,8 +110,6 @@ void __init kvm_update_va_mask(struct al
BUG_ON(nr_inst != 5);
@@ -60,7 +60,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for (i = 0; i < nr_inst; i++) {
u32 rd, rn, insn, oinsn;
-@@ -167,9 +165,6 @@ void kvm_patch_vector_branch(struct alt_
+@@ -156,9 +154,6 @@ void kvm_patch_vector_branch(struct alt_
return;
}
diff --git a/debian/patches-rt/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch b/debian/patches-rt/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch
index e37cd5f6a..b4574fcb8 100644
--- a/debian/patches-rt/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch
+++ b/debian/patches-rt/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 25 Jul 2018 14:02:38 +0200
Subject: [PATCH] arm64: fpsimd: use preemp_disable in addition to
local_bh_disable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
In v4.16-RT I noticed a number of warnings from task_fpsimd_load(). The
code disables BH and expects that it is not preemptible. On -RT the
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
-@@ -159,6 +159,16 @@ static void sve_free(struct task_struct
+@@ -162,6 +162,16 @@ static void sve_free(struct task_struct
__sve_free(task);
}
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* TIF_SVE controls whether a task can use SVE without trapping while
* in userspace, and also the way a task's FPSIMD/SVE state is stored
-@@ -547,6 +557,7 @@ int sve_set_vector_length(struct task_st
+@@ -557,6 +567,7 @@ int sve_set_vector_length(struct task_st
* non-SVE thread.
*/
if (task == current) {
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_bh_disable();
fpsimd_save();
-@@ -557,8 +568,10 @@ int sve_set_vector_length(struct task_st
+@@ -566,8 +577,10 @@ int sve_set_vector_length(struct task_st
if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
sve_to_fpsimd(task);
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Force reallocation of task SVE state to the correct size
-@@ -813,6 +826,7 @@ asmlinkage void do_sve_acc(unsigned int
+@@ -880,6 +893,7 @@ asmlinkage void do_sve_acc(unsigned int
sve_alloc(current);
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_bh_disable();
fpsimd_save();
-@@ -826,6 +840,7 @@ asmlinkage void do_sve_acc(unsigned int
+@@ -892,6 +906,7 @@ asmlinkage void do_sve_acc(unsigned int
WARN_ON(1); /* SVE access shouldn't have trapped */
local_bh_enable();
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -888,10 +903,12 @@ void fpsimd_thread_switch(struct task_st
+@@ -954,10 +969,12 @@ void fpsimd_thread_switch(struct task_st
void fpsimd_flush_thread(void)
{
int vl, supported_vl;
@@ -84,8 +84,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ preempt_disable();
local_bh_disable();
- memset(&current->thread.uw.fpsimd_state, 0,
-@@ -900,7 +917,7 @@ void fpsimd_flush_thread(void)
+ fpsimd_flush_task_state(current);
+@@ -966,7 +983,7 @@ void fpsimd_flush_thread(void)
if (system_supports_sve()) {
clear_thread_flag(TIF_SVE);
@@ -94,8 +94,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Reset the task vector length as required.
-@@ -936,6 +953,8 @@ void fpsimd_flush_thread(void)
- set_thread_flag(TIF_FOREIGN_FPSTATE);
+@@ -1000,6 +1017,8 @@ void fpsimd_flush_thread(void)
+ }
local_bh_enable();
+ preempt_enable();
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -947,9 +966,11 @@ void fpsimd_preserve_current_state(void)
+@@ -1011,9 +1030,11 @@ void fpsimd_preserve_current_state(void)
if (!system_supports_fpsimd())
return;
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -1007,6 +1028,7 @@ void fpsimd_restore_current_state(void)
+@@ -1076,6 +1097,7 @@ void fpsimd_restore_current_state(void)
if (!system_supports_fpsimd())
return;
@@ -123,7 +123,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_bh_disable();
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
-@@ -1015,6 +1037,7 @@ void fpsimd_restore_current_state(void)
+@@ -1084,6 +1106,7 @@ void fpsimd_restore_current_state(void)
}
local_bh_enable();
@@ -131,7 +131,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -1027,6 +1050,7 @@ void fpsimd_update_current_state(struct
+@@ -1096,6 +1119,7 @@ void fpsimd_update_current_state(struct
if (!system_supports_fpsimd())
return;
@@ -139,7 +139,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_bh_disable();
current->thread.uw.fpsimd_state = *state;
-@@ -1039,6 +1063,7 @@ void fpsimd_update_current_state(struct
+@@ -1108,6 +1132,7 @@ void fpsimd_update_current_state(struct
clear_thread_flag(TIF_FOREIGN_FPSTATE);
local_bh_enable();
@@ -147,7 +147,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -1084,6 +1109,7 @@ void kernel_neon_begin(void)
+@@ -1170,6 +1195,7 @@ void kernel_neon_begin(void)
BUG_ON(!may_use_simd());
@@ -155,7 +155,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_bh_disable();
__this_cpu_write(kernel_neon_busy, true);
-@@ -1097,6 +1123,7 @@ void kernel_neon_begin(void)
+@@ -1183,6 +1209,7 @@ void kernel_neon_begin(void)
preempt_disable();
local_bh_enable();
diff --git a/debian/patches-rt/at91_dont_enable_disable_clock.patch b/debian/patches-rt/at91_dont_enable_disable_clock.patch
index c5e20c755..ffc78bd48 100644
--- a/debian/patches-rt/at91_dont_enable_disable_clock.patch
+++ b/debian/patches-rt/at91_dont_enable_disable_clock.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 09 Mar 2016 10:51:06 +0100
Subject: arm: at91: do not disable/enable clocks in a row
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Currently the driver will disable the clock and enable it one line later
if it is switching from periodic mode into one shot.
diff --git a/debian/patches-rt/block-blk-mq-move-blk_queue_usage_counter_release-in.patch b/debian/patches-rt/block-blk-mq-move-blk_queue_usage_counter_release-in.patch
index 4b81dfe8a..ca85ddeb2 100644
--- a/debian/patches-rt/block-blk-mq-move-blk_queue_usage_counter_release-in.patch
+++ b/debian/patches-rt/block-blk-mq-move-blk_queue_usage_counter_release-in.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 13 Mar 2018 13:49:16 +0100
Subject: [PATCH] block: blk-mq: move blk_queue_usage_counter_release()
into process context
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
| BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914
| in_atomic(): 1, irqs_disabled(): 0, pid: 255, name: kworker/u257:6
@@ -52,11 +52,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-core.c
+++ b/block/blk-core.c
-@@ -447,12 +447,21 @@ void blk_queue_exit(struct request_queue
+@@ -403,12 +403,21 @@ void blk_queue_exit(struct request_queue
percpu_ref_put(&q->q_usage_counter);
}
-+static void blk_queue_usage_counter_release_wrk(struct kthread_work *work)
++static void blk_queue_usage_counter_release_wrk(struct work_struct *work)
+{
+ struct request_queue *q =
+ container_of(work, struct request_queue, mq_pcpu_wake);
@@ -71,18 +71,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- wake_up_all(&q->mq_freeze_wq);
+ if (wq_has_sleeper(&q->mq_freeze_wq))
-+ kthread_schedule_work(&q->mq_pcpu_wake);
++ schedule_work(&q->mq_pcpu_wake);
}
static void blk_rq_timed_out_timer(struct timer_list *t)
-@@ -524,6 +533,7 @@ struct request_queue *blk_alloc_queue_no
+@@ -479,6 +488,7 @@ struct request_queue *blk_alloc_queue_no
spin_lock_init(&q->queue_lock);
init_waitqueue_head(&q->mq_freeze_wq);
-+ kthread_init_work(&q->mq_pcpu_wake, blk_queue_usage_counter_release_wrk);
++ INIT_WORK(&q->mq_pcpu_wake, blk_queue_usage_counter_release_wrk);
+ mutex_init(&q->mq_freeze_lock);
/*
- * Init percpu_ref in atomic mode so that it's faster to shutdown.
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -13,6 +13,7 @@
@@ -93,11 +93,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/pagemap.h>
#include <linux/backing-dev-defs.h>
#include <linux/wait.h>
-@@ -549,6 +550,7 @@ struct request_queue {
+@@ -554,6 +555,7 @@ struct request_queue {
#endif
struct rcu_head rcu_head;
wait_queue_head_t mq_freeze_wq;
-+ struct kthread_work mq_pcpu_wake;
- struct percpu_ref q_usage_counter;
- struct list_head all_q_node;
-
++ struct work_struct mq_pcpu_wake;
+ /*
+ * Protect concurrent access to q_usage_counter by
+ * percpu_ref_kill() and percpu_ref_reinit().
diff --git a/debian/patches-rt/block-mq-don-t-complete-requests-via-IPI.patch b/debian/patches-rt/block-mq-don-t-complete-requests-via-IPI.patch
index aaa0ad678..d4bde258b 100644
--- a/debian/patches-rt/block-mq-don-t-complete-requests-via-IPI.patch
+++ b/debian/patches-rt/block-mq-don-t-complete-requests-via-IPI.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 29 Jan 2015 15:10:08 +0100
Subject: block/mq: don't complete requests via IPI
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The IPI runs in hardirq context and there are sleeping locks. Assume caches are
shared and complete them on the local CPU.
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -606,8 +606,16 @@ static void __blk_mq_complete_request(st
+@@ -605,8 +605,16 @@ static void __blk_mq_complete_request(st
}
cpu = get_cpu_light();
diff --git a/debian/patches-rt/block-mq-drop-preempt-disable.patch b/debian/patches-rt/block-mq-drop-preempt-disable.patch
index ed61c2bf2..95eba526e 100644
--- a/debian/patches-rt/block-mq-drop-preempt-disable.patch
+++ b/debian/patches-rt/block-mq-drop-preempt-disable.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: block/mq: do not invoke preempt_disable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
preempt_disable() and get_cpu() don't play well together with the sleeping
locks it tries to allocate later.
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -605,7 +605,7 @@ static void __blk_mq_complete_request(st
+@@ -604,7 +604,7 @@ static void __blk_mq_complete_request(st
return;
}
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
shared = cpus_share_cache(cpu, ctx->cpu);
-@@ -617,7 +617,7 @@ static void __blk_mq_complete_request(st
+@@ -616,7 +616,7 @@ static void __blk_mq_complete_request(st
} else {
q->mq_ops->complete(rq);
}
@@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
-@@ -1452,14 +1452,14 @@ static void __blk_mq_delay_run_hw_queue(
+@@ -1456,14 +1456,14 @@ static void __blk_mq_delay_run_hw_queue(
return;
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
diff --git a/debian/patches-rt/block-mq-use-cpu_light.patch b/debian/patches-rt/block-mq-use-cpu_light.patch
index a757ad3b2..e1dc211e9 100644
--- a/debian/patches-rt/block-mq-use-cpu_light.patch
+++ b/debian/patches-rt/block-mq-use-cpu_light.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 9 Apr 2014 10:37:23 +0200
Subject: block: mq: use cpu_light()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
there is a might sleep splat because get_cpu() disables preemption and
later we grab a lock. As a workaround for this we use get_cpu_light().
diff --git a/debian/patches-rt/block-use-cpu-chill.patch b/debian/patches-rt/block-use-cpu-chill.patch
index ed17fc9a9..5d7ec38a0 100644
--- a/debian/patches-rt/block-use-cpu-chill.patch
+++ b/debian/patches-rt/block-use-cpu-chill.patch
@@ -1,7 +1,7 @@
Subject: block: Use cpu_chill() for retry loops
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 20 Dec 2012 18:28:26 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Retry loops on RT might loop forever when the modifying side was
preempted. Steven also observed a live lock when there was a
diff --git a/debian/patches-rt/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch b/debian/patches-rt/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch
index 2534034e3..98e6e4404 100644
--- a/debian/patches-rt/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch
+++ b/debian/patches-rt/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 3 Jul 2018 18:19:48 +0200
Subject: [PATCH] cgroup: use irqsave in cgroup_rstat_flush_locked()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
All callers of cgroup_rstat_flush_locked() acquire cgroup_rstat_lock
either with spin_lock_irq() or spin_lock_irqsave().
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
-@@ -159,8 +159,9 @@ static void cgroup_rstat_flush_locked(st
+@@ -160,8 +160,9 @@ static void cgroup_rstat_flush_locked(st
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
cpu);
struct cgroup *pos = NULL;
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
struct cgroup_subsys_state *css;
-@@ -172,7 +173,7 @@ static void cgroup_rstat_flush_locked(st
+@@ -173,7 +174,7 @@ static void cgroup_rstat_flush_locked(st
css->ss->css_rstat_flush(css, cpu);
rcu_read_unlock();
}
diff --git a/debian/patches-rt/cgroups-use-simple-wait-in-css_release.patch b/debian/patches-rt/cgroups-use-simple-wait-in-css_release.patch
deleted file mode 100644
index 8d66b4122..000000000
--- a/debian/patches-rt/cgroups-use-simple-wait-in-css_release.patch
+++ /dev/null
@@ -1,71 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 13 Feb 2015 15:52:24 +0100
-Subject: cgroups: use simple wait in css_release()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-To avoid:
-|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914
-|in_atomic(): 1, irqs_disabled(): 0, pid: 92, name: rcuc/11
-|2 locks held by rcuc/11/92:
-| #0: (rcu_callback){......}, at: [<ffffffff810e037e>] rcu_cpu_kthread+0x3de/0x940
-| #1: (rcu_read_lock_sched){......}, at: [<ffffffff81328390>] percpu_ref_call_confirm_rcu+0x0/0xd0
-|Preemption disabled at:[<ffffffff813284e2>] percpu_ref_switch_to_atomic_rcu+0x82/0xc0
-|CPU: 11 PID: 92 Comm: rcuc/11 Not tainted 3.18.7-rt0+ #1
-| ffff8802398cdf80 ffff880235f0bc28 ffffffff815b3a12 0000000000000000
-| 0000000000000000 ffff880235f0bc48 ffffffff8109aa16 0000000000000000
-| ffff8802398cdf80 ffff880235f0bc78 ffffffff815b8dd4 000000000000df80
-|Call Trace:
-| [<ffffffff815b3a12>] dump_stack+0x4f/0x7c
-| [<ffffffff8109aa16>] __might_sleep+0x116/0x190
-| [<ffffffff815b8dd4>] rt_spin_lock+0x24/0x60
-| [<ffffffff8108d2cd>] queue_work_on+0x6d/0x1d0
-| [<ffffffff8110c881>] css_release+0x81/0x90
-| [<ffffffff8132844e>] percpu_ref_call_confirm_rcu+0xbe/0xd0
-| [<ffffffff813284e2>] percpu_ref_switch_to_atomic_rcu+0x82/0xc0
-| [<ffffffff810e03e5>] rcu_cpu_kthread+0x445/0x940
-| [<ffffffff81098a2d>] smpboot_thread_fn+0x18d/0x2d0
-| [<ffffffff810948d8>] kthread+0xe8/0x100
-| [<ffffffff815b9c3c>] ret_from_fork+0x7c/0xb0
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/cgroup-defs.h | 1 +
- kernel/cgroup/cgroup.c | 8 ++++----
- 2 files changed, 5 insertions(+), 4 deletions(-)
-
---- a/include/linux/cgroup-defs.h
-+++ b/include/linux/cgroup-defs.h
-@@ -159,6 +159,7 @@ struct cgroup_subsys_state {
-
- /* percpu_ref killing and RCU release */
- struct work_struct destroy_work;
-+ struct kthread_work destroy_kwork;
- struct rcu_work destroy_rwork;
-
- /*
---- a/kernel/cgroup/cgroup.c
-+++ b/kernel/cgroup/cgroup.c
-@@ -4697,10 +4697,10 @@ static void css_free_rwork_fn(struct wor
- }
- }
-
--static void css_release_work_fn(struct work_struct *work)
-+static void css_release_work_fn(struct kthread_work *work)
- {
- struct cgroup_subsys_state *css =
-- container_of(work, struct cgroup_subsys_state, destroy_work);
-+ container_of(work, struct cgroup_subsys_state, destroy_kwork);
- struct cgroup_subsys *ss = css->ss;
- struct cgroup *cgrp = css->cgroup;
-
-@@ -4760,8 +4760,8 @@ static void css_release(struct percpu_re
- struct cgroup_subsys_state *css =
- container_of(ref, struct cgroup_subsys_state, refcnt);
-
-- INIT_WORK(&css->destroy_work, css_release_work_fn);
-- queue_work(cgroup_destroy_wq, &css->destroy_work);
-+ kthread_init_work(&css->destroy_kwork, css_release_work_fn);
-+ kthread_schedule_work(&css->destroy_kwork);
- }
-
- static void init_and_link_css(struct cgroup_subsys_state *css,
diff --git a/debian/patches-rt/clocksource-tclib-add-proper-depend.patch b/debian/patches-rt/clocksource-tclib-add-proper-depend.patch
new file mode 100644
index 000000000..d3b3fe786
--- /dev/null
+++ b/debian/patches-rt/clocksource-tclib-add-proper-depend.patch
@@ -0,0 +1,25 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 29 May 2019 15:50:36 +0200
+Subject: clocksource: TCLIB: Add proper depend
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+Add a depends statement on ATMEL_TCLIB to ensure that it is not built on !ATMEL
+if COMPILE_TEST is enabled. The build will fail because the driver depends on
+`atmel_tc_divisors'.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/clocksource/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/clocksource/Kconfig
++++ b/drivers/clocksource/Kconfig
+@@ -424,7 +424,7 @@ config ATMEL_ST
+
+ config ATMEL_TCB_CLKSRC
+ bool "Atmel TC Block timer driver" if COMPILE_TEST
+- depends on HAS_IOMEM
++ depends on HAS_IOMEM && ATMEL_TCLIB
+ select TIMER_OF if OF
+ help
+ Support for Timer Counter Blocks on Atmel SoCs.
diff --git a/debian/patches-rt/clocksource-tclib-allow-higher-clockrates.patch b/debian/patches-rt/clocksource-tclib-allow-higher-clockrates.patch
index 3cab457a4..2fc247601 100644
--- a/debian/patches-rt/clocksource-tclib-allow-higher-clockrates.patch
+++ b/debian/patches-rt/clocksource-tclib-allow-higher-clockrates.patch
@@ -1,7 +1,7 @@
From: Benedikt Spranger <b.spranger@linutronix.de>
Date: Mon, 8 Mar 2010 18:57:04 +0100
Subject: clocksource: TCLIB: Allow higher clock rates for clock events
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
As default the TCLIB uses the 32KiHz base clock rate for clock events.
Add a compile time selection to allow higher clock resulution.
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
-@@ -419,6 +419,13 @@ config ATMEL_TCB_CLKSRC
+@@ -429,6 +429,13 @@ config ATMEL_TCB_CLKSRC
help
Support for Timer Counter Blocks on Atmel SoCs.
diff --git a/debian/patches-rt/completion-use-simple-wait-queues.patch b/debian/patches-rt/completion-use-simple-wait-queues.patch
index ae7307097..0078d128b 100644
--- a/debian/patches-rt/completion-use-simple-wait-queues.patch
+++ b/debian/patches-rt/completion-use-simple-wait-queues.patch
@@ -1,7 +1,7 @@
Subject: completion: Use simple wait queues
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 11 Jan 2013 11:23:51 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Completions have no long lasting callbacks and therefor do not need
the complex waitqueue variant. Use simple waitqueues which reduces the
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/platforms/ps3/device-init.c
+++ b/arch/powerpc/platforms/ps3/device-init.c
-@@ -752,8 +752,8 @@ static int ps3_notification_read_write(s
+@@ -738,8 +738,8 @@ static int ps3_notification_read_write(s
}
pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op);
@@ -51,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
default:
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
-@@ -1704,7 +1704,7 @@ static void ffs_data_put(struct ffs_data
+@@ -1705,7 +1705,7 @@ static void ffs_data_put(struct ffs_data
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
@@ -147,7 +147,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
-@@ -681,6 +681,10 @@ static int load_image_and_restore(void)
+@@ -688,6 +688,10 @@ static int load_image_and_restore(void)
return error;
}
@@ -158,7 +158,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* hibernate - Carry out system hibernation, including saving the image.
*/
-@@ -694,6 +698,8 @@ int hibernate(void)
+@@ -701,6 +705,8 @@ int hibernate(void)
return -EPERM;
}
@@ -167,7 +167,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lock_system_sleep();
/* The snapshot device should not be opened while we're running */
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
-@@ -772,6 +778,7 @@ int hibernate(void)
+@@ -777,6 +783,7 @@ int hibernate(void)
atomic_inc(&snapshot_device_available);
Unlock:
unlock_system_sleep();
@@ -177,7 +177,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return error;
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
-@@ -600,6 +600,8 @@ static int enter_state(suspend_state_t s
+@@ -605,6 +605,8 @@ static int enter_state(suspend_state_t s
return error;
}
@@ -186,7 +186,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* pm_suspend - Externally visible function for suspending the system.
* @state: System sleep state to enter.
-@@ -614,6 +616,7 @@ int pm_suspend(suspend_state_t state)
+@@ -619,6 +621,7 @@ int pm_suspend(suspend_state_t state)
if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
return -EINVAL;
@@ -194,7 +194,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pr_info("suspend entry (%s)\n", mem_sleep_labels[state]);
error = enter_state(state);
if (error) {
-@@ -623,6 +626,7 @@ int pm_suspend(suspend_state_t state)
+@@ -628,6 +631,7 @@ int pm_suspend(suspend_state_t state)
suspend_stats.success++;
}
pr_info("suspend exit\n");
@@ -300,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(completion_done);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7121,7 +7121,10 @@ void migrate_disable(void)
+@@ -7130,7 +7130,10 @@ void migrate_disable(void)
return;
}
#ifdef CONFIG_SCHED_DEBUG
@@ -312,7 +312,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
if (p->migrate_disable) {
-@@ -7151,7 +7154,10 @@ void migrate_enable(void)
+@@ -7160,7 +7163,10 @@ void migrate_enable(void)
}
#ifdef CONFIG_SCHED_DEBUG
diff --git a/debian/patches-rt/cond-resched-lock-rt-tweak.patch b/debian/patches-rt/cond-resched-lock-rt-tweak.patch
index 3a75143ee..c3000e546 100644
--- a/debian/patches-rt/cond-resched-lock-rt-tweak.patch
+++ b/debian/patches-rt/cond-resched-lock-rt-tweak.patch
@@ -1,7 +1,7 @@
Subject: sched: Use the proper LOCK_OFFSET for cond_resched()
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 22:51:33 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
RT does not increment preempt count when a 'sleeping' spinlock is
locked. Update PREEMPT_LOCK_OFFSET for that case.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -115,7 +115,11 @@
+@@ -124,7 +124,11 @@
/*
* The preempt_count offset after spin_lock()
*/
diff --git a/debian/patches-rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch b/debian/patches-rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch
index 4d43347db..0d58debbb 100644
--- a/debian/patches-rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch
+++ b/debian/patches-rt/connector-cn_proc-Protect-send_msg-with-a-local-lock.patch
@@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Sun, 16 Oct 2016 05:11:54 +0200
Subject: [PATCH] connector/cn_proc: Protect send_msg() with a local lock
on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:931
|in_atomic(): 1, irqs_disabled(): 0, pid: 31807, name: sleep
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
-@@ -32,6 +32,7 @@
+@@ -18,6 +18,7 @@
#include <linux/pid_namespace.h>
#include <linux/cn_proc.h>
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Size of a cn_msg followed by a proc_event structure. Since the
-@@ -54,10 +55,11 @@ static struct cb_id cn_proc_event_id = {
+@@ -40,10 +41,11 @@ static struct cb_id cn_proc_event_id = {
/* proc_event_counts is used as the sequence number of the netlink message */
static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
msg->seq = __this_cpu_inc_return(proc_event_counts) - 1;
((struct proc_event *)msg->data)->cpu = smp_processor_id();
-@@ -70,7 +72,7 @@ static inline void send_msg(struct cn_ms
+@@ -56,7 +58,7 @@ static inline void send_msg(struct cn_ms
*/
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
diff --git a/debian/patches-rt/cpu-hotplug--Implement-CPU-pinning.patch b/debian/patches-rt/cpu-hotplug--Implement-CPU-pinning.patch
index 79f379d45..c24e8c904 100644
--- a/debian/patches-rt/cpu-hotplug--Implement-CPU-pinning.patch
+++ b/debian/patches-rt/cpu-hotplug--Implement-CPU-pinning.patch
@@ -1,7 +1,7 @@
Subject: cpu/hotplug: Implement CPU pinning
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 19 Jul 2017 17:31:20 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -670,6 +670,7 @@ struct task_struct {
+@@ -662,6 +662,7 @@ struct task_struct {
#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
int migrate_disable;
int migrate_disable_update;
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
# endif
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -75,6 +75,11 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_s
+@@ -76,6 +76,11 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_s
.fail = CPUHP_INVALID,
};
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
static struct lockdep_map cpuhp_state_up_map =
STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
-@@ -286,7 +291,28 @@ static int cpu_hotplug_disabled;
+@@ -287,7 +292,28 @@ static int cpu_hotplug_disabled;
*/
void pin_current_cpu(void)
{
@@ -62,7 +62,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -294,6 +320,13 @@ void pin_current_cpu(void)
+@@ -295,6 +321,13 @@ void pin_current_cpu(void)
*/
void unpin_current_cpu(void)
{
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
-@@ -882,6 +915,7 @@ static int take_cpu_down(void *_param)
+@@ -885,6 +918,7 @@ static int take_cpu_down(void *_param)
static int takedown_cpu(unsigned int cpu)
{
@@ -84,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
-@@ -894,11 +928,14 @@ static int takedown_cpu(unsigned int cpu
+@@ -897,11 +931,14 @@ static int takedown_cpu(unsigned int cpu
*/
irq_lock_sparse();
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* CPU refused to die */
irq_unlock_sparse();
/* Unpark the hotplug thread so we can rollback there */
-@@ -917,6 +954,7 @@ static int takedown_cpu(unsigned int cpu
+@@ -920,6 +957,7 @@ static int takedown_cpu(unsigned int cpu
wait_for_ap_thread(st, false);
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
diff --git a/debian/patches-rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch b/debian/patches-rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
index 68fcf2158..4716458ed 100644
--- a/debian/patches-rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
+++ b/debian/patches-rt/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 9 Apr 2015 15:23:01 +0200
Subject: cpufreq: drop K8's driver from beeing selected
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Ralf posted a picture of a backtrace from
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
-@@ -125,7 +125,7 @@ config X86_POWERNOW_K7_ACPI
+@@ -126,7 +126,7 @@ config X86_POWERNOW_K7_ACPI
config X86_POWERNOW_K8
tristate "AMD Opteron/Athlon64 PowerNow!"
diff --git a/debian/patches-rt/cpumask-disable-offstack-on-rt.patch b/debian/patches-rt/cpumask-disable-offstack-on-rt.patch
index 89663a612..15093882a 100644
--- a/debian/patches-rt/cpumask-disable-offstack-on-rt.patch
+++ b/debian/patches-rt/cpumask-disable-offstack-on-rt.patch
@@ -1,7 +1,7 @@
Subject: cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 14 Dec 2011 01:03:49 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
There are "valid" GFP_ATOMIC allocations such as
@@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -949,7 +949,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
+@@ -938,7 +938,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
config MAXSMP
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL
@@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
If unsure, say N.
--- a/lib/Kconfig
+++ b/lib/Kconfig
-@@ -452,6 +452,7 @@ config CHECK_SIGNATURE
+@@ -469,6 +469,7 @@ config CHECK_SIGNATURE
config CPUMASK_OFFSTACK
bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
diff --git a/debian/patches-rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch b/debian/patches-rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
index 4f408d677..d0802ea6c 100644
--- a/debian/patches-rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
+++ b/debian/patches-rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
@@ -1,7 +1,7 @@
From: Mike Galbraith <efault@gmx.de>
Date: Sun, 8 Jan 2017 09:32:25 +0100
Subject: [PATCH] cpuset: Convert callback_lock to raw_spinlock_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The two commits below add up to a cpuset might_sleep() splat for RT:
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
-@@ -345,7 +345,7 @@ static struct cpuset top_cpuset = {
+@@ -333,7 +333,7 @@ static struct cpuset top_cpuset = {
*/
static DEFINE_MUTEX(cpuset_mutex);
@@ -60,7 +60,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static struct workqueue_struct *cpuset_migrate_mm_wq;
-@@ -1220,7 +1220,7 @@ static int update_parent_subparts_cpumas
+@@ -1234,7 +1234,7 @@ static int update_parent_subparts_cpumas
* Newly added CPUs will be removed from effective_cpus and
* newly deleted ones will be added back to effective_cpus.
*/
@@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (adding) {
cpumask_or(parent->subparts_cpus,
parent->subparts_cpus, tmp->addmask);
-@@ -1239,7 +1239,7 @@ static int update_parent_subparts_cpumas
+@@ -1253,7 +1253,7 @@ static int update_parent_subparts_cpumas
}
parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus);
@@ -78,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return cmd == partcmd_update;
}
-@@ -1344,7 +1344,7 @@ static void update_cpumasks_hier(struct
+@@ -1358,7 +1358,7 @@ static void update_cpumasks_hier(struct
continue;
rcu_read_unlock();
@@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cpumask_copy(cp->effective_cpus, tmp->new_cpus);
if (cp->nr_subparts_cpus &&
-@@ -1375,7 +1375,7 @@ static void update_cpumasks_hier(struct
+@@ -1389,7 +1389,7 @@ static void update_cpumasks_hier(struct
= cpumask_weight(cp->subparts_cpus);
}
}
@@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
WARN_ON(!is_in_v2_mode() &&
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
-@@ -1493,7 +1493,7 @@ static int update_cpumask(struct cpuset
+@@ -1507,7 +1507,7 @@ static int update_cpumask(struct cpuset
return -EINVAL;
}
@@ -105,7 +105,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
/*
-@@ -1504,7 +1504,7 @@ static int update_cpumask(struct cpuset
+@@ -1518,7 +1518,7 @@ static int update_cpumask(struct cpuset
cs->cpus_allowed);
cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
}
@@ -114,7 +114,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
update_cpumasks_hier(cs, &tmp);
-@@ -1698,9 +1698,9 @@ static void update_nodemasks_hier(struct
+@@ -1712,9 +1712,9 @@ static void update_nodemasks_hier(struct
continue;
rcu_read_unlock();
@@ -126,7 +126,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
WARN_ON(!is_in_v2_mode() &&
!nodes_equal(cp->mems_allowed, cp->effective_mems));
-@@ -1768,9 +1768,9 @@ static int update_nodemask(struct cpuset
+@@ -1782,9 +1782,9 @@ static int update_nodemask(struct cpuset
if (retval < 0)
goto done;
@@ -138,7 +138,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* use trialcs->mems_allowed as a temp variable */
update_nodemasks_hier(cs, &trialcs->mems_allowed);
-@@ -1861,9 +1861,9 @@ static int update_flag(cpuset_flagbits_t
+@@ -1875,9 +1875,9 @@ static int update_flag(cpuset_flagbits_t
spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
|| (is_spread_page(cs) != is_spread_page(trialcs)));
@@ -150,7 +150,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
rebuild_sched_domains_locked();
-@@ -2366,7 +2366,7 @@ static int cpuset_common_seq_show(struct
+@@ -2380,7 +2380,7 @@ static int cpuset_common_seq_show(struct
cpuset_filetype_t type = seq_cft(sf)->private;
int ret = 0;
@@ -159,7 +159,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
switch (type) {
case FILE_CPULIST:
-@@ -2388,7 +2388,7 @@ static int cpuset_common_seq_show(struct
+@@ -2402,7 +2402,7 @@ static int cpuset_common_seq_show(struct
ret = -EINVAL;
}
@@ -168,7 +168,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -2698,14 +2698,14 @@ static int cpuset_css_online(struct cgro
+@@ -2712,14 +2712,14 @@ static int cpuset_css_online(struct cgro
cpuset_inc();
@@ -185,7 +185,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
goto out_unlock;
-@@ -2732,12 +2732,12 @@ static int cpuset_css_online(struct cgro
+@@ -2746,12 +2746,12 @@ static int cpuset_css_online(struct cgro
}
rcu_read_unlock();
@@ -200,7 +200,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
mutex_unlock(&cpuset_mutex);
return 0;
-@@ -2790,7 +2790,7 @@ static void cpuset_css_free(struct cgrou
+@@ -2804,7 +2804,7 @@ static void cpuset_css_free(struct cgrou
static void cpuset_bind(struct cgroup_subsys_state *root_css)
{
mutex_lock(&cpuset_mutex);
@@ -209,7 +209,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (is_in_v2_mode()) {
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
-@@ -2801,7 +2801,7 @@ static void cpuset_bind(struct cgroup_su
+@@ -2815,7 +2815,7 @@ static void cpuset_bind(struct cgroup_su
top_cpuset.mems_allowed = top_cpuset.effective_mems;
}
@@ -218,7 +218,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
mutex_unlock(&cpuset_mutex);
}
-@@ -2902,12 +2902,12 @@ hotplug_update_tasks_legacy(struct cpuse
+@@ -2916,12 +2916,12 @@ hotplug_update_tasks_legacy(struct cpuse
{
bool is_empty;
@@ -233,7 +233,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Don't call update_tasks_cpumask() if the cpuset becomes empty,
-@@ -2944,10 +2944,10 @@ hotplug_update_tasks(struct cpuset *cs,
+@@ -2958,10 +2958,10 @@ hotplug_update_tasks(struct cpuset *cs,
if (nodes_empty(*new_mems))
*new_mems = parent_cs(cs)->effective_mems;
@@ -246,7 +246,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (cpus_updated)
update_tasks_cpumask(cs);
-@@ -3102,7 +3102,7 @@ static void cpuset_hotplug_workfn(struct
+@@ -3116,7 +3116,7 @@ static void cpuset_hotplug_workfn(struct
/* synchronize cpus_allowed to cpu_active_mask */
if (cpus_updated) {
@@ -255,7 +255,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!on_dfl)
cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
/*
-@@ -3122,17 +3122,17 @@ static void cpuset_hotplug_workfn(struct
+@@ -3136,17 +3136,17 @@ static void cpuset_hotplug_workfn(struct
}
}
cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
@@ -276,7 +276,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
update_tasks_nodemask(&top_cpuset);
}
-@@ -3233,11 +3233,11 @@ void cpuset_cpus_allowed(struct task_str
+@@ -3247,11 +3247,11 @@ void cpuset_cpus_allowed(struct task_str
{
unsigned long flags;
@@ -289,8 +289,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ raw_spin_unlock_irqrestore(&callback_lock, flags);
}
- void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
-@@ -3285,11 +3285,11 @@ nodemask_t cpuset_mems_allowed(struct ta
+ /**
+@@ -3312,11 +3312,11 @@ nodemask_t cpuset_mems_allowed(struct ta
nodemask_t mask;
unsigned long flags;
@@ -304,7 +304,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return mask;
}
-@@ -3381,14 +3381,14 @@ bool __cpuset_node_allowed(int node, gfp
+@@ -3408,14 +3408,14 @@ bool __cpuset_node_allowed(int node, gfp
return true;
/* Not hardwall and node outside mems_allowed: scan up cpusets */
diff --git a/debian/patches-rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch b/debian/patches-rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch
index bfe7c7f1f..79d371f66 100644
--- a/debian/patches-rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch
+++ b/debian/patches-rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 21 Feb 2014 17:24:04 +0100
Subject: crypto: Reduce preempt disabled regions, more algos
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Don Estabrook reported
| kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100()
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
-@@ -61,7 +61,7 @@ static inline void cast5_fpu_end(bool fp
+@@ -46,7 +46,7 @@ static inline void cast5_fpu_end(bool fp
static int ecb_crypt(struct skcipher_request *req, bool enc)
{
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
-@@ -76,7 +76,7 @@ static int ecb_crypt(struct skcipher_req
+@@ -61,7 +61,7 @@ static int ecb_crypt(struct skcipher_req
u8 *wsrc = walk.src.virt.addr;
u8 *wdst = walk.dst.virt.addr;
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Process multi-block batch */
if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
-@@ -105,10 +105,9 @@ static int ecb_crypt(struct skcipher_req
+@@ -90,10 +90,9 @@ static int ecb_crypt(struct skcipher_req
} while (nbytes >= bsize);
done:
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return err;
}
-@@ -212,7 +211,7 @@ static int cbc_decrypt(struct skcipher_r
+@@ -197,7 +196,7 @@ static int cbc_decrypt(struct skcipher_r
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct skcipher_walk walk;
unsigned int nbytes;
int err;
-@@ -220,12 +219,11 @@ static int cbc_decrypt(struct skcipher_r
+@@ -205,12 +204,11 @@ static int cbc_decrypt(struct skcipher_r
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
@@ -98,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return err;
}
-@@ -292,7 +290,7 @@ static int ctr_crypt(struct skcipher_req
+@@ -277,7 +275,7 @@ static int ctr_crypt(struct skcipher_req
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct skcipher_walk walk;
unsigned int nbytes;
int err;
-@@ -300,13 +298,12 @@ static int ctr_crypt(struct skcipher_req
+@@ -285,13 +283,12 @@ static int ctr_crypt(struct skcipher_req
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
@@ -125,7 +125,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
err = skcipher_walk_done(&walk, 0);
--- a/arch/x86/crypto/glue_helper.c
+++ b/arch/x86/crypto/glue_helper.c
-@@ -38,7 +38,7 @@ int glue_ecb_req_128bit(const struct com
+@@ -23,7 +23,7 @@ int glue_ecb_req_128bit(const struct com
void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
const unsigned int bsize = 128 / 8;
struct skcipher_walk walk;
@@ -134,7 +134,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
unsigned int nbytes;
int err;
-@@ -51,7 +51,7 @@ int glue_ecb_req_128bit(const struct com
+@@ -36,7 +36,7 @@ int glue_ecb_req_128bit(const struct com
unsigned int i;
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for (i = 0; i < gctx->num_funcs; i++) {
func_bytes = bsize * gctx->funcs[i].num_blocks;
-@@ -69,10 +69,9 @@ int glue_ecb_req_128bit(const struct com
+@@ -54,10 +54,9 @@ int glue_ecb_req_128bit(const struct com
if (nbytes < bsize)
break;
}
@@ -155,7 +155,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return err;
}
EXPORT_SYMBOL_GPL(glue_ecb_req_128bit);
-@@ -115,7 +114,7 @@ int glue_cbc_decrypt_req_128bit(const st
+@@ -100,7 +99,7 @@ int glue_cbc_decrypt_req_128bit(const st
void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
const unsigned int bsize = 128 / 8;
struct skcipher_walk walk;
@@ -164,7 +164,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
unsigned int nbytes;
int err;
-@@ -129,7 +128,7 @@ int glue_cbc_decrypt_req_128bit(const st
+@@ -114,7 +113,7 @@ int glue_cbc_decrypt_req_128bit(const st
u128 last_iv;
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
@@ -173,7 +173,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Start of the last block. */
src += nbytes / bsize - 1;
dst += nbytes / bsize - 1;
-@@ -161,10 +160,10 @@ int glue_cbc_decrypt_req_128bit(const st
+@@ -146,10 +145,10 @@ int glue_cbc_decrypt_req_128bit(const st
done:
u128_xor(dst, dst, (u128 *)walk.iv);
*(u128 *)walk.iv = last_iv;
@@ -185,7 +185,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return err;
}
EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit);
-@@ -175,7 +174,7 @@ int glue_ctr_req_128bit(const struct com
+@@ -160,7 +159,7 @@ int glue_ctr_req_128bit(const struct com
void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
const unsigned int bsize = 128 / 8;
struct skcipher_walk walk;
@@ -194,7 +194,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
unsigned int nbytes;
int err;
-@@ -189,7 +188,7 @@ int glue_ctr_req_128bit(const struct com
+@@ -174,7 +173,7 @@ int glue_ctr_req_128bit(const struct com
le128 ctrblk;
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
@@ -203,7 +203,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
be128_to_le128(&ctrblk, (be128 *)walk.iv);
-@@ -213,11 +212,10 @@ int glue_ctr_req_128bit(const struct com
+@@ -198,11 +197,10 @@ int glue_ctr_req_128bit(const struct com
}
le128_to_be128((be128 *)walk.iv, &ctrblk);
@@ -216,7 +216,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (nbytes) {
le128 ctrblk;
u128 tmp;
-@@ -278,7 +276,7 @@ int glue_xts_req_128bit(const struct com
+@@ -263,7 +261,7 @@ int glue_xts_req_128bit(const struct com
{
const unsigned int bsize = 128 / 8;
struct skcipher_walk walk;
@@ -225,7 +225,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
unsigned int nbytes;
int err;
-@@ -289,21 +287,24 @@ int glue_xts_req_128bit(const struct com
+@@ -274,21 +272,24 @@ int glue_xts_req_128bit(const struct com
/* set minimum length to bsize, for tweak_fn */
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
diff --git a/debian/patches-rt/crypto-chtls-remove-cdev_list_lock.patch b/debian/patches-rt/crypto-chtls-remove-cdev_list_lock.patch
deleted file mode 100644
index 84eceffb4..000000000
--- a/debian/patches-rt/crypto-chtls-remove-cdev_list_lock.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 11 Feb 2019 11:06:11 +0100
-Subject: [PATCH] crypto: chtls: remove cdev_list_lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Last user of cdev_list_lock was removed in commit
-
- 6422ccc5fbefb ("crypto/chelsio/chtls: listen fails with multiadapt")
-
-Cc: Atul Gupta <atul.gupta@chelsio.com>
-Cc: Harsh Jain <harsh@chelsio.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/crypto/chelsio/chtls/chtls_main.c | 1 -
- 1 file changed, 1 deletion(-)
-
---- a/drivers/crypto/chelsio/chtls/chtls_main.c
-+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
-@@ -30,7 +30,6 @@
- */
- static LIST_HEAD(cdev_list);
- static DEFINE_MUTEX(cdev_mutex);
--static DEFINE_MUTEX(cdev_list_lock);
-
- static DEFINE_MUTEX(notify_mutex);
- static RAW_NOTIFIER_HEAD(listen_notify_list);
diff --git a/debian/patches-rt/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch b/debian/patches-rt/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch
index 05610184e..ce929ef6f 100644
--- a/debian/patches-rt/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch
+++ b/debian/patches-rt/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 26 Jul 2018 18:52:00 +0200
Subject: [PATCH] crypto: cryptd - add a lock instead
preempt_disable/local_bh_disable
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
cryptd has a per-CPU lock which protected with local_bh_disable() and
preempt_disable().
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
-@@ -39,6 +39,7 @@ MODULE_PARM_DESC(cryptd_max_cpu_qlen, "S
+@@ -34,6 +34,7 @@ MODULE_PARM_DESC(cryptd_max_cpu_qlen, "S
struct cryptd_cpu_queue {
struct crypto_queue queue;
struct work_struct work;
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
struct cryptd_queue {
-@@ -117,6 +118,7 @@ static int cryptd_init_queue(struct cryp
+@@ -103,6 +104,7 @@ static int cryptd_init_queue(struct cryp
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
return 0;
-@@ -141,8 +143,10 @@ static int cryptd_enqueue_request(struct
+@@ -127,8 +129,10 @@ static int cryptd_enqueue_request(struct
struct cryptd_cpu_queue *cpu_queue;
atomic_t *refcnt;
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
err = crypto_enqueue_request(&cpu_queue->queue, request);
refcnt = crypto_tfm_ctx(request->tfm);
-@@ -158,7 +162,7 @@ static int cryptd_enqueue_request(struct
+@@ -144,7 +148,7 @@ static int cryptd_enqueue_request(struct
atomic_inc(refcnt);
out_put_cpu:
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return err;
}
-@@ -174,16 +178,11 @@ static void cryptd_queue_worker(struct w
+@@ -160,16 +164,11 @@ static void cryptd_queue_worker(struct w
cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
/*
* Only handle one request at a time to avoid hogging crypto workqueue.
diff --git a/debian/patches-rt/crypto-limit-more-FPU-enabled-sections.patch b/debian/patches-rt/crypto-limit-more-FPU-enabled-sections.patch
index f7433ecf9..85c675bf5 100644
--- a/debian/patches-rt/crypto-limit-more-FPU-enabled-sections.patch
+++ b/debian/patches-rt/crypto-limit-more-FPU-enabled-sections.patch
@@ -4,7 +4,7 @@ Subject: [PATCH] crypto: limit more FPU-enabled sections
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Those crypto drivers use SSE/AVX/… for their crypto work and in order to
do so in kernel they need to enable the "FPU" in kernel mode which
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/crypto/chacha_glue.c
+++ b/arch/x86/crypto/chacha_glue.c
-@@ -131,7 +131,6 @@ static int chacha_simd_stream_xor(struct
+@@ -127,7 +127,6 @@ static int chacha_simd_stream_xor(struct
struct chacha_ctx *ctx, u8 *iv)
{
u32 *state, state_buf[16 + 2] __aligned(8);
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int err = 0;
BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
-@@ -144,20 +143,14 @@ static int chacha_simd_stream_xor(struct
+@@ -140,20 +139,14 @@ static int chacha_simd_stream_xor(struct
if (nbytes < walk->total) {
nbytes = round_down(nbytes, walk->stride);
@@ -79,7 +79,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Use fpregs_lock() while editing CPU's FPU registers or fpu->state.
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
-@@ -133,6 +133,18 @@ void kernel_fpu_end(void)
+@@ -134,6 +134,18 @@ void kernel_fpu_end(void)
}
EXPORT_SYMBOL_GPL(kernel_fpu_end);
diff --git a/debian/patches-rt/crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch b/debian/patches-rt/crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch
deleted file mode 100644
index 279579609..000000000
--- a/debian/patches-rt/crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch
+++ /dev/null
@@ -1,77 +0,0 @@
-From: Mike Galbraith <efault@gmx.de>
-Date: Wed, 11 Jul 2018 17:14:47 +0200
-Subject: [PATCH] crypto: scompress - serialize RT percpu scratch buffer
- access with a local lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-| BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:974
-| in_atomic(): 1, irqs_disabled(): 0, pid: 1401, name: cryptomgr_test
-| Preemption disabled at:
-| [<ffff00000849941c>] scomp_acomp_comp_decomp+0x34/0x1a0
-| CPU: 21 PID: 1401 Comm: cryptomgr_test Tainted: G W 4.16.18-rt9-rt #1
-| Hardware name: www.cavium.com crb-1s/crb-1s, BIOS 0.3 Apr 25 2017
-| Call trace:
-| dump_backtrace+0x0/0x1c8
-| show_stack+0x24/0x30
-| dump_stack+0xac/0xe8
-| ___might_sleep+0x124/0x188
-| rt_spin_lock+0x40/0x88
-| zip_load_instr+0x44/0x170 [thunderx_zip]
-| zip_deflate+0x184/0x378 [thunderx_zip]
-| zip_compress+0xb0/0x130 [thunderx_zip]
-| zip_scomp_compress+0x48/0x60 [thunderx_zip]
-| scomp_acomp_comp_decomp+0xd8/0x1a0
-| scomp_acomp_compress+0x24/0x30
-| test_acomp+0x15c/0x558
-| alg_test_comp+0xc0/0x128
-| alg_test.part.6+0x120/0x2c0
-| alg_test+0x6c/0xa0
-| cryptomgr_test+0x50/0x58
-| kthread+0x134/0x138
-| ret_from_fork+0x10/0x18
-
-Mainline disables preemption to serialize percpu scratch buffer access,
-causing the splat above. Serialize with a local lock for RT instead.
-
-Signed-off-by: Mike Galbraith <efault@gmx.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- crypto/scompress.c | 6 ++++--
- 1 file changed, 4 insertions(+), 2 deletions(-)
-
---- a/crypto/scompress.c
-+++ b/crypto/scompress.c
-@@ -24,6 +24,7 @@
- #include <linux/cryptouser.h>
- #include <net/netlink.h>
- #include <linux/scatterlist.h>
-+#include <linux/locallock.h>
- #include <crypto/scatterwalk.h>
- #include <crypto/internal/acompress.h>
- #include <crypto/internal/scompress.h>
-@@ -34,6 +35,7 @@ static void * __percpu *scomp_src_scratc
- static void * __percpu *scomp_dst_scratches;
- static int scomp_scratch_users;
- static DEFINE_MUTEX(scomp_lock);
-+static DEFINE_LOCAL_IRQ_LOCK(scomp_scratches_lock);
-
- #ifdef CONFIG_NET
- static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
-@@ -143,7 +145,7 @@ static int scomp_acomp_comp_decomp(struc
- void **tfm_ctx = acomp_tfm_ctx(tfm);
- struct crypto_scomp *scomp = *tfm_ctx;
- void **ctx = acomp_request_ctx(req);
-- const int cpu = get_cpu();
-+ const int cpu = local_lock_cpu(scomp_scratches_lock);
- u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
- u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
- int ret;
-@@ -178,7 +180,7 @@ static int scomp_acomp_comp_decomp(struc
- 1);
- }
- out:
-- put_cpu();
-+ local_unlock_cpu(scomp_scratches_lock);
- return ret;
- }
-
diff --git a/debian/patches-rt/crypto-user-remove-crypto_cfg_mutex.patch b/debian/patches-rt/crypto-user-remove-crypto_cfg_mutex.patch
deleted file mode 100644
index 5d1a065ac..000000000
--- a/debian/patches-rt/crypto-user-remove-crypto_cfg_mutex.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 11 Feb 2019 11:52:38 +0100
-Subject: [PATCH] crypto: user: remove crypto_cfg_mutex
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-crypto_cfg_mutex was never used since it got introduced in commit
-
- cac5818c25d04 ("crypto: user - Implement a generic crypto statistics")
-
-Cc: Corentin Labbe <clabbe@baylibre.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- crypto/crypto_user_stat.c | 2 --
- 1 file changed, 2 deletions(-)
-
---- a/crypto/crypto_user_stat.c
-+++ b/crypto/crypto_user_stat.c
-@@ -20,8 +20,6 @@
-
- #define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
-
--static DEFINE_MUTEX(crypto_cfg_mutex);
--
- extern struct sock *crypto_nlsk;
-
- struct crypto_dump_info {
diff --git a/debian/patches-rt/debugobjects-rt.patch b/debian/patches-rt/debugobjects-rt.patch
index 9fe1b7363..a2f74c00e 100644
--- a/debian/patches-rt/debugobjects-rt.patch
+++ b/debian/patches-rt/debugobjects-rt.patch
@@ -1,7 +1,7 @@
Subject: debugobjects: Make RT aware
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 21:41:35 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Avoid filling the pool / allocating memory with irqs off().
diff --git a/debian/patches-rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/debian/patches-rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
index 168ddb736..95dfd0c1c 100644
--- a/debian/patches-rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
+++ b/debian/patches-rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
@@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Thu, 31 Mar 2016 04:08:28 +0200
Subject: [PATCH] drivers/block/zram: Replace bit spinlocks with rtmutex
for -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
They're nondeterministic, and lead to ___might_sleep() splats in -rt.
OTOH, they're a lot less wasteful than an rtmutex per page.
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline bool init_done(struct zram *zram)
{
-@@ -1153,6 +1188,7 @@ static bool zram_meta_alloc(struct zram
+@@ -1154,6 +1189,7 @@ static bool zram_meta_alloc(struct zram
if (!huge_class_size)
huge_class_size = zs_huge_class_size(zram->mem_pool);
diff --git a/debian/patches-rt/drivers-tty-fix-omap-lock-crap.patch b/debian/patches-rt/drivers-tty-fix-omap-lock-crap.patch
index 077846afb..f32227b9a 100644
--- a/debian/patches-rt/drivers-tty-fix-omap-lock-crap.patch
+++ b/debian/patches-rt/drivers-tty-fix-omap-lock-crap.patch
@@ -1,7 +1,7 @@
Subject: tty/serial/omap: Make the locking RT aware
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 28 Jul 2011 13:32:57 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The lock is a sleeping lock and local_irq_save() is not the
optimsation we are looking for. Redo it to make it work on -RT and
diff --git a/debian/patches-rt/drivers-tty-pl011-irq-disable-madness.patch b/debian/patches-rt/drivers-tty-pl011-irq-disable-madness.patch
index 2157b3ac7..cf8f7a7e7 100644
--- a/debian/patches-rt/drivers-tty-pl011-irq-disable-madness.patch
+++ b/debian/patches-rt/drivers-tty-pl011-irq-disable-madness.patch
@@ -1,7 +1,7 @@
Subject: tty/serial/pl011: Make the locking work on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 08 Jan 2013 21:36:51 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The lock is a sleeping lock and local_irq_save() is not the optimsation
we are looking for. Redo it to make it work on -RT and non-RT.
diff --git a/debian/patches-rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch b/debian/patches-rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
index 729cd8cfd..687e74fe9 100644
--- a/debian/patches-rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
+++ b/debian/patches-rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
@@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Thu, 20 Oct 2016 11:15:22 +0200
Subject: [PATCH] drivers/zram: Don't disable preemption in
zcomp_stream_get/put()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
In v4.7, the driver switched to percpu compression streams, disabling
preemption via get/put_cpu_ptr(). Use a per-zcomp_strm lock here. We
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
-@@ -116,12 +116,20 @@ ssize_t zcomp_available_show(const char
+@@ -113,12 +113,20 @@ ssize_t zcomp_available_show(const char
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
{
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
int zcomp_compress(struct zcomp_strm *zstrm,
-@@ -171,6 +179,7 @@ int zcomp_cpu_up_prepare(unsigned int cp
+@@ -168,6 +176,7 @@ int zcomp_cpu_up_prepare(unsigned int cp
pr_err("Can't allocate a compression stream\n");
return -ENOMEM;
}
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
-@@ -14,6 +14,7 @@ struct zcomp_strm {
+@@ -10,6 +10,7 @@ struct zcomp_strm {
/* compression/decompression buffer */
void *buffer;
struct crypto_comp *tfm;
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* dynamic per-device compression frontend */
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
-@@ -1251,6 +1251,7 @@ static int __zram_bvec_read(struct zram
+@@ -1252,6 +1252,7 @@ static int __zram_bvec_read(struct zram
unsigned long handle;
unsigned int size;
void *src, *dst;
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
zram_slot_lock(zram, index);
if (zram_test_flag(zram, index, ZRAM_WB)) {
-@@ -1281,6 +1282,7 @@ static int __zram_bvec_read(struct zram
+@@ -1282,6 +1283,7 @@ static int __zram_bvec_read(struct zram
size = zram_get_obj_size(zram, index);
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
dst = kmap_atomic(page);
-@@ -1288,14 +1290,13 @@ static int __zram_bvec_read(struct zram
+@@ -1289,14 +1291,13 @@ static int __zram_bvec_read(struct zram
kunmap_atomic(dst);
ret = 0;
} else {
diff --git a/debian/patches-rt/drm-i915-Don-t-disable-interrupts-independently-of-t.patch b/debian/patches-rt/drm-i915-Don-t-disable-interrupts-independently-of-t.patch
index 1c8bd8771..eabf6e19d 100644
--- a/debian/patches-rt/drm-i915-Don-t-disable-interrupts-independently-of-t.patch
+++ b/debian/patches-rt/drm-i915-Don-t-disable-interrupts-independently-of-t.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 10 Apr 2019 11:01:37 +0200
Subject: [PATCH] drm/i915: Don't disable interrupts independently of the
lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The locks (timeline->lock and rq->lock) need to be taken with disabled
interrupts. This is done in __retire_engine_request() by disabling the
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
-@@ -278,9 +278,7 @@ static void __retire_engine_request(stru
+@@ -202,9 +202,7 @@ static void __retire_engine_request(stru
GEM_BUG_ON(!i915_request_completed(rq));
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests));
list_del_init(&rq->link);
spin_unlock(&engine->timeline.lock);
-@@ -294,9 +292,7 @@ static void __retire_engine_request(stru
+@@ -219,9 +217,7 @@ static void __retire_engine_request(stru
GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
}
diff --git a/debian/patches-rt/drm-i915-disable-tracing-on-RT.patch b/debian/patches-rt/drm-i915-disable-tracing-on-RT.patch
index d53862a57..25859565d 100644
--- a/debian/patches-rt/drm-i915-disable-tracing-on-RT.patch
+++ b/debian/patches-rt/drm-i915-disable-tracing-on-RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 6 Dec 2018 09:52:20 +0100
Subject: [PATCH] drm/i915: disable tracing on -RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Luca Abeni reported this:
| BUG: scheduling while atomic: kworker/u8:2/15203/0x00000003
diff --git a/debian/patches-rt/drm-i915-fence-Do-not-use-TIMER_IRQSAFE.patch b/debian/patches-rt/drm-i915-fence-Do-not-use-TIMER_IRQSAFE.patch
deleted file mode 100644
index 6997f680f..000000000
--- a/debian/patches-rt/drm-i915-fence-Do-not-use-TIMER_IRQSAFE.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 12 Feb 2019 12:45:19 +0100
-Subject: [PATCH] drm/i915/fence: Do not use TIMER_IRQSAFE
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-The timer is initialized with TIMER_IRQSAFE flag. It does look like the
-timer callback requires this flag at all. Its sole purpose is to ensure
-synchronisation in the workqueue code.
-
-Remove TIMER_IRQSAFE flag because it is not required.
-
-Cc: Jani Nikula <jani.nikula@linux.intel.com>
-Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
-Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
-Cc: David Airlie <airlied@linux.ie>
-Cc: Daniel Vetter <daniel@ffwll.ch>
-Cc: intel-gfx@lists.freedesktop.org
-Cc: dri-devel@lists.freedesktop.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/gpu/drm/i915/i915_sw_fence.c | 3 +--
- 1 file changed, 1 insertion(+), 2 deletions(-)
-
---- a/drivers/gpu/drm/i915/i915_sw_fence.c
-+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
-@@ -461,8 +461,7 @@ int i915_sw_fence_await_dma_fence(struct
- timer->dma = dma_fence_get(dma);
- init_irq_work(&timer->work, irq_i915_sw_fence_work);
-
-- timer_setup(&timer->timer,
-- timer_i915_sw_fence_wake, TIMER_IRQSAFE);
-+ timer_setup(&timer->timer, timer_i915_sw_fence_wake, 0);
- mod_timer(&timer->timer, round_jiffies_up(jiffies + timeout));
-
- func = dma_i915_sw_fence_wake_timer;
diff --git a/debian/patches-rt/drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch b/debian/patches-rt/drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
index 3ea18ebb6..4440eae99 100644
--- a/debian/patches-rt/drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
+++ b/debian/patches-rt/drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 19 Dec 2018 10:47:02 +0100
Subject: [PATCH] drm/i915: skip DRM_I915_LOW_LEVEL_TRACEPOINTS with NOTRACE
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The order of the header files is important. If this header file is
included after tracepoint.h was included then the NOTRACE here becomes a
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
-@@ -683,7 +683,7 @@ DEFINE_EVENT(i915_request, i915_request_
+@@ -733,7 +733,7 @@ DEFINE_EVENT(i915_request, i915_request_
TP_ARGS(rq)
);
diff --git a/debian/patches-rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch b/debian/patches-rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
index 860a5a52a..22f55a09b 100644
--- a/debian/patches-rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
+++ b/debian/patches-rt/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
@@ -1,7 +1,7 @@
Subject: drm,i915: Use local_lock/unlock_irq() in intel_pipe_update_start/end()
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Sat, 27 Feb 2016 09:01:42 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
[ 8.014039] BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:918
@@ -62,15 +62,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
-@@ -36,6 +36,7 @@
- #include <drm/drm_rect.h>
- #include <drm/drm_atomic.h>
+@@ -38,6 +38,7 @@
#include <drm/drm_plane_helper.h>
-+#include <linux/locallock.h>
- #include "intel_drv.h"
- #include "intel_frontbuffer.h"
+ #include <drm/drm_rect.h>
#include <drm/i915_drm.h>
-@@ -61,6 +62,8 @@ int intel_usecs_to_scanlines(const struc
++#include <linux/locallock.h>
+
+ #include "i915_drv.h"
+ #include "intel_atomic_plane.h"
+@@ -79,6 +80,8 @@ int intel_usecs_to_scanlines(const struc
#define VBLANK_EVASION_TIME_US 100
#endif
@@ -79,7 +79,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* intel_pipe_update_start() - start update of a set of display registers
* @new_crtc_state: the new crtc state
-@@ -110,7 +113,7 @@ void intel_pipe_update_start(const struc
+@@ -128,7 +131,7 @@ void intel_pipe_update_start(const struc
DRM_ERROR("PSR idle timed out 0x%x, atomic update may fail\n",
psr_status);
@@ -88,7 +88,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
crtc->debug.min_vbl = min;
crtc->debug.max_vbl = max;
-@@ -134,11 +137,11 @@ void intel_pipe_update_start(const struc
+@@ -152,11 +155,11 @@ void intel_pipe_update_start(const struc
break;
}
@@ -102,7 +102,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
finish_wait(wq, &wait);
-@@ -171,7 +174,7 @@ void intel_pipe_update_start(const struc
+@@ -189,7 +192,7 @@ void intel_pipe_update_start(const struc
return;
irq_disable:
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -207,7 +210,7 @@ void intel_pipe_update_end(struct intel_
+@@ -225,7 +228,7 @@ void intel_pipe_update_end(struct intel_
new_crtc_state->base.event = NULL;
}
diff --git a/debian/patches-rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch b/debian/patches-rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
index c921f819a..0a7a0c02f 100644
--- a/debian/patches-rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
+++ b/debian/patches-rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
@@ -1,7 +1,7 @@
Subject: drm,radeon,i915: Use preempt_disable/enable_rt() where recommended
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Sat, 27 Feb 2016 08:09:11 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
DRM folks identified the spots, so use them.
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
-@@ -1025,6 +1025,7 @@ static bool i915_get_crtc_scanoutpos(str
+@@ -1100,6 +1100,7 @@ static bool i915_get_crtc_scanoutpos(str
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Get optional system timestamp before query. */
if (stime)
-@@ -1076,6 +1077,7 @@ static bool i915_get_crtc_scanoutpos(str
+@@ -1151,6 +1152,7 @@ static bool i915_get_crtc_scanoutpos(str
*etime = ktime_get();
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
-@@ -1813,6 +1813,7 @@ int radeon_get_crtc_scanoutpos(struct dr
+@@ -1814,6 +1814,7 @@ int radeon_get_crtc_scanoutpos(struct dr
struct radeon_device *rdev = dev->dev_private;
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Get optional system timestamp before query. */
if (stime)
-@@ -1905,6 +1906,7 @@ int radeon_get_crtc_scanoutpos(struct dr
+@@ -1906,6 +1907,7 @@ int radeon_get_crtc_scanoutpos(struct dr
*etime = ktime_get();
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
diff --git a/debian/patches-rt/efi-Allow-efi-runtime.patch b/debian/patches-rt/efi-Allow-efi-runtime.patch
index 0a03f3f11..a066773ba 100644
--- a/debian/patches-rt/efi-Allow-efi-runtime.patch
+++ b/debian/patches-rt/efi-Allow-efi-runtime.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 26 Jul 2018 15:06:10 +0200
Subject: [PATCH] efi: Allow efi=runtime
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
In case the option "efi=noruntime" is default at built-time, the user
could overwrite its sate by `efi=runtime' and allow it again.
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
-@@ -114,6 +114,9 @@ static int __init parse_efi_cmdline(char
+@@ -113,6 +113,9 @@ static int __init parse_efi_cmdline(char
if (parse_option_str(str, "noruntime"))
disable_runtime = true;
diff --git a/debian/patches-rt/efi-Disable-runtime-services-on-RT.patch b/debian/patches-rt/efi-Disable-runtime-services-on-RT.patch
index 5010060c2..6f0abe735 100644
--- a/debian/patches-rt/efi-Disable-runtime-services-on-RT.patch
+++ b/debian/patches-rt/efi-Disable-runtime-services-on-RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 26 Jul 2018 15:03:16 +0200
Subject: [PATCH] efi: Disable runtime services on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Based on meassurements the EFI functions get_variable /
get_next_variable take up to 2us which looks okay.
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
-@@ -88,7 +88,7 @@ struct mm_struct efi_mm = {
+@@ -87,7 +87,7 @@ struct mm_struct efi_mm = {
struct workqueue_struct *efi_rts_wq;
diff --git a/debian/patches-rt/epoll-use-get-cpu-light.patch b/debian/patches-rt/epoll-use-get-cpu-light.patch
index 4c0dc5c56..9ae66f90e 100644
--- a/debian/patches-rt/epoll-use-get-cpu-light.patch
+++ b/debian/patches-rt/epoll-use-get-cpu-light.patch
@@ -1,7 +1,7 @@
Subject: fs/epoll: Do not disable preemption on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 08 Jul 2011 16:35:35 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
ep_call_nested() takes a sleeping lock so we can't disable preemption.
The light version is enough since ep_call_nested() doesn't mind beeing
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
-@@ -571,12 +571,12 @@ static int ep_poll_wakeup_proc(void *pri
+@@ -567,12 +567,12 @@ static int ep_poll_wakeup_proc(void *pri
static void ep_poll_safewake(wait_queue_head_t *wq)
{
diff --git a/debian/patches-rt/fs-aio-simple-simple-work.patch b/debian/patches-rt/fs-aio-simple-simple-work.patch
index bc9cf33e9..a5ca639b3 100644
--- a/debian/patches-rt/fs-aio-simple-simple-work.patch
+++ b/debian/patches-rt/fs-aio-simple-simple-work.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 16 Feb 2015 18:49:10 +0100
Subject: fs/aio: simple simple work
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:768
|in_atomic(): 1, irqs_disabled(): 0, pid: 26, name: rcuos/2
@@ -34,23 +34,23 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
long nr_pages;
struct rcu_work free_rwork; /* see free_ioctx() */
-+ struct kthread_work free_kwork; /* see free_ioctx() */
++ struct work_struct free_work; /* see free_ioctx() */
/*
* signals when all in-flight requests are done
-@@ -613,9 +614,9 @@ static void free_ioctx_reqs(struct percp
+@@ -612,9 +613,9 @@ static void free_ioctx_reqs(struct percp
* and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
* now it's safe to cancel any that need to be.
*/
-static void free_ioctx_users(struct percpu_ref *ref)
-+static void free_ioctx_users_work(struct kthread_work *work)
++static void free_ioctx_users_work(struct work_struct *work)
{
- struct kioctx *ctx = container_of(ref, struct kioctx, users);
-+ struct kioctx *ctx = container_of(work, struct kioctx, free_kwork);
++ struct kioctx *ctx = container_of(work, struct kioctx, free_work);
struct aio_kiocb *req;
spin_lock_irq(&ctx->ctx_lock);
-@@ -633,6 +634,14 @@ static void free_ioctx_users(struct perc
+@@ -632,6 +633,14 @@ static void free_ioctx_users(struct perc
percpu_ref_put(&ctx->reqs);
}
@@ -58,8 +58,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+{
+ struct kioctx *ctx = container_of(ref, struct kioctx, users);
+
-+ kthread_init_work(&ctx->free_kwork, free_ioctx_users_work);
-+ kthread_schedule_work(&ctx->free_kwork);
++ INIT_WORK(&ctx->free_work, free_ioctx_users_work);
++ schedule_work(&ctx->free_work);
+}
+
static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
diff --git a/debian/patches-rt/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch b/debian/patches-rt/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch
index 28f9676ab..aef4fc929 100644
--- a/debian/patches-rt/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch
+++ b/debian/patches-rt/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 13 Sep 2017 12:32:34 +0200
Subject: [PATCH] fs/dcache: bring back explicit INIT_HLIST_BL_HEAD init
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Commit 3d375d78593c ("mm: update callers to use HASH_ZERO flag") removed
INIT_HLIST_BL_HEAD and uses the ZERO flag instead for the init. However
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/dcache.c
+++ b/fs/dcache.c
-@@ -3070,6 +3070,8 @@ static int __init set_dhash_entries(char
+@@ -3075,6 +3075,8 @@ static int __init set_dhash_entries(char
static void __init dcache_init_early(void)
{
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* If hashes are distributed across NUMA nodes, defer
* hash allocation until vmalloc space is available.
*/
-@@ -3086,11 +3088,16 @@ static void __init dcache_init_early(voi
+@@ -3091,11 +3093,16 @@ static void __init dcache_init_early(voi
NULL,
0,
0);
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* A constructor could be added for stable state like the lists,
* but it is probably not worth it because of the cache nature
-@@ -3114,6 +3121,10 @@ static void __init dcache_init(void)
+@@ -3119,6 +3126,10 @@ static void __init dcache_init(void)
NULL,
0,
0);
diff --git a/debian/patches-rt/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch b/debian/patches-rt/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
index 998249e73..e3cf0e934 100644
--- a/debian/patches-rt/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
+++ b/debian/patches-rt/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 20 Oct 2017 11:29:53 +0200
Subject: [PATCH] fs/dcache: disable preemption on i_dir_seq's write side
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
i_dir_seq is an opencoded seqcounter. Based on the code it looks like we
could have two writers in parallel despite the fact that the d_lock is
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/dcache.c
+++ b/fs/dcache.c
-@@ -2412,9 +2412,10 @@ EXPORT_SYMBOL(d_rehash);
+@@ -2418,9 +2418,10 @@ EXPORT_SYMBOL(d_rehash);
static inline unsigned start_dir_add(struct inode *dir)
{
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return n;
cpu_relax();
}
-@@ -2422,7 +2423,8 @@ static inline unsigned start_dir_add(str
+@@ -2428,7 +2429,8 @@ static inline unsigned start_dir_add(str
static inline void end_dir_add(struct inode *dir, unsigned n)
{
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void d_wait_lookup(struct dentry *dentry)
-@@ -2455,7 +2457,7 @@ struct dentry *d_alloc_parallel(struct d
+@@ -2461,7 +2463,7 @@ struct dentry *d_alloc_parallel(struct d
retry:
rcu_read_lock();
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
r_seq = read_seqbegin(&rename_lock);
dentry = __d_lookup_rcu(parent, name, &d_seq);
if (unlikely(dentry)) {
-@@ -2483,7 +2485,7 @@ struct dentry *d_alloc_parallel(struct d
+@@ -2489,7 +2491,7 @@ struct dentry *d_alloc_parallel(struct d
}
hlist_bl_lock(b);
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto retry;
--- a/fs/inode.c
+++ b/fs/inode.c
-@@ -155,7 +155,7 @@ int inode_init_always(struct super_block
+@@ -156,7 +156,7 @@ int inode_init_always(struct super_block
inode->i_bdev = NULL;
inode->i_cdev = NULL;
inode->i_link = NULL;
@@ -78,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/libfs.c
+++ b/fs/libfs.c
-@@ -90,7 +90,7 @@ static struct dentry *next_positive(stru
+@@ -91,7 +91,7 @@ static struct dentry *next_positive(stru
struct list_head *from,
int count)
{
@@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct dentry *res;
struct list_head *p;
bool skipped;
-@@ -123,8 +123,9 @@ static struct dentry *next_positive(stru
+@@ -124,8 +124,9 @@ static struct dentry *next_positive(stru
static void move_cursor(struct dentry *cursor, struct list_head *after)
{
struct dentry *parent = cursor->d_parent;
@@ -98,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for (;;) {
n = *seq;
if (!(n & 1) && cmpxchg(seq, n, n + 1) == n)
-@@ -137,6 +138,7 @@ static void move_cursor(struct dentry *c
+@@ -138,6 +139,7 @@ static void move_cursor(struct dentry *c
else
list_add_tail(&cursor->d_child, &parent->d_subdirs);
smp_store_release(seq, n + 2);
@@ -108,7 +108,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
-@@ -694,7 +694,7 @@ struct inode {
+@@ -709,7 +709,7 @@ struct inode {
struct block_device *i_bdev;
struct cdev *i_cdev;
char *i_link;
diff --git a/debian/patches-rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/debian/patches-rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch
index 47b197bf5..03a0c73c3 100644
--- a/debian/patches-rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch
+++ b/debian/patches-rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch
@@ -1,7 +1,7 @@
Subject: fs: dcache: Use cpu_chill() in trylock loops
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 07 Mar 2012 21:00:34 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Retry loops on RT might loop forever when the modifying side was
preempted. Use cpu_chill() instead of cpu_relax() to let the system
@@ -16,15 +16,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/autofs/expire.c
+++ b/fs/autofs/expire.c
-@@ -8,6 +8,7 @@
- * option, any later version, incorporated herein by reference.
+@@ -5,6 +5,7 @@
+ * Copyright 2001-2006 Ian Kent <raven@themaw.net>
*/
+#include <linux/delay.h>
#include "autofs_i.h"
/* Check if a dentry can be expired */
-@@ -153,7 +154,7 @@ static struct dentry *get_next_positive_
+@@ -150,7 +151,7 @@ static struct dentry *get_next_positive_
parent = p->d_parent;
if (!spin_trylock(&parent->d_lock)) {
spin_unlock(&p->d_lock);
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/security.h>
#include <linux/cred.h>
#include <linux/idr.h>
-@@ -324,8 +325,11 @@ int __mnt_want_write(struct vfsmount *m)
+@@ -326,8 +327,11 @@ int __mnt_want_write(struct vfsmount *m)
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
diff --git a/debian/patches-rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/debian/patches-rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
index 8b8aa6c48..9e49e32f0 100644
--- a/debian/patches-rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
+++ b/debian/patches-rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
@@ -1,13 +1,14 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 14 Sep 2016 14:35:49 +0200
Subject: [PATCH] fs/dcache: use swait_queue instead of waitqueue
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
__d_lookup_done() invokes wake_up_all() while holding a hlist_bl_lock()
which disables preemption. As a workaround convert it to swait.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
+ fs/afs/dir_silly.c | 2 +-
fs/cifs/readdir.c | 2 +-
fs/dcache.c | 27 +++++++++++++++------------
fs/fuse/readdir.c | 2 +-
@@ -19,8 +20,19 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
include/linux/dcache.h | 4 ++--
include/linux/nfs_xdr.h | 2 +-
kernel/sched/swait.c | 1 +
- 11 files changed, 29 insertions(+), 25 deletions(-)
+ 12 files changed, 30 insertions(+), 26 deletions(-)
+--- a/fs/afs/dir_silly.c
++++ b/fs/afs/dir_silly.c
+@@ -207,7 +207,7 @@ int afs_silly_iput(struct dentry *dentry
+ struct dentry *alias;
+ int ret;
+
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+
+ _enter("%p{%pd},%llx", dentry, dentry, vnode->fid.vnode);
+
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -80,7 +80,7 @@ cifs_prime_dcache(struct dentry *parent,
@@ -34,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/dcache.c
+++ b/fs/dcache.c
-@@ -2429,21 +2429,24 @@ static inline void end_dir_add(struct in
+@@ -2435,21 +2435,24 @@ static inline void end_dir_add(struct in
static void d_wait_lookup(struct dentry *dentry)
{
@@ -70,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
unsigned int hash = name->hash;
struct hlist_bl_head *b = in_lookup_hash(parent, hash);
-@@ -2558,7 +2561,7 @@ void __d_lookup_done(struct dentry *dent
+@@ -2564,7 +2567,7 @@ void __d_lookup_done(struct dentry *dent
hlist_bl_lock(b);
dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
@@ -92,7 +104,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/fs/namei.c
+++ b/fs/namei.c
-@@ -1645,7 +1645,7 @@ static struct dentry *__lookup_slow(cons
+@@ -1643,7 +1643,7 @@ static struct dentry *__lookup_slow(cons
{
struct dentry *dentry, *old;
struct inode *inode = dir->d_inode;
@@ -101,7 +113,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Don't go there if it's already dead */
if (unlikely(IS_DEADDIR(inode)))
-@@ -3135,7 +3135,7 @@ static int lookup_open(struct nameidata
+@@ -3133,7 +3133,7 @@ static int lookup_open(struct nameidata
struct dentry *dentry;
int error, create_error = 0;
umode_t mode = op->mode;
@@ -112,7 +124,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return -ENOENT;
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
-@@ -440,7 +440,7 @@ static
+@@ -448,7 +448,7 @@ static
void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
{
struct qstr filename = QSTR_INIT(entry->name, entry->len);
@@ -121,7 +133,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct dentry *dentry;
struct dentry *alias;
struct inode *dir = d_inode(parent);
-@@ -1490,7 +1490,7 @@ int nfs_atomic_open(struct inode *dir, s
+@@ -1570,7 +1570,7 @@ int nfs_atomic_open(struct inode *dir, s
struct file *file, unsigned open_flags,
umode_t mode)
{
@@ -141,7 +153,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/namei.h>
#include <linux/fsnotify.h>
-@@ -202,7 +202,7 @@ nfs_async_unlink(struct dentry *dentry,
+@@ -203,7 +203,7 @@ nfs_async_unlink(struct dentry *dentry,
data->cred = get_current_cred();
data->res.dir_attr = &data->dir_attr;
@@ -163,7 +175,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto end_instantiate;
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
-@@ -677,7 +677,7 @@ static bool proc_sys_fill_cache(struct f
+@@ -694,7 +694,7 @@ static bool proc_sys_fill_cache(struct f
child = d_lookup(dir, &qname);
if (!child) {
@@ -183,9 +195,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
struct list_head d_child; /* child of parent list */
struct list_head d_subdirs; /* our children */
-@@ -237,7 +237,7 @@ extern struct dentry * d_alloc(struct de
+@@ -236,7 +236,7 @@ extern void d_set_d_op(struct dentry *de
+ extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
extern struct dentry * d_alloc_anon(struct super_block *);
- extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
- wait_queue_head_t *);
+ struct swait_queue_head *);
@@ -194,7 +206,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
-@@ -1559,7 +1559,7 @@ struct nfs_unlinkdata {
+@@ -1594,7 +1594,7 @@ struct nfs_unlinkdata {
struct nfs_removeargs args;
struct nfs_removeres res;
struct dentry *dentry;
diff --git a/debian/patches-rt/fs-jbd-replace-bh_state-lock.patch b/debian/patches-rt/fs-jbd-replace-bh_state-lock.patch
index def0b8000..23810ab9d 100644
--- a/debian/patches-rt/fs-jbd-replace-bh_state-lock.patch
+++ b/debian/patches-rt/fs-jbd-replace-bh_state-lock.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 18 Mar 2011 10:11:25 +0100
Subject: fs: jbd/jbd2: Make state lock and journal head lock rt safe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
bit_spin_locks break under RT.
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
-@@ -347,32 +347,56 @@ static inline struct journal_head *bh2jh
+@@ -344,32 +344,56 @@ static inline struct journal_head *bh2jh
static inline void jbd_lock_bh_state(struct buffer_head *bh)
{
diff --git a/debian/patches-rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch b/debian/patches-rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
index 91f088a90..116866c8d 100644
--- a/debian/patches-rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
+++ b/debian/patches-rt/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 15 Sep 2016 10:51:27 +0200
Subject: [PATCH] fs/nfs: turn rmdir_sem into a semaphore
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The RW semaphore had a reader side which used the _non_owner version
because it most likely took the reader lock in one thread and released it
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
-@@ -1810,7 +1810,11 @@ int nfs_rmdir(struct inode *dir, struct
+@@ -1890,7 +1890,11 @@ int nfs_rmdir(struct inode *dir, struct
trace_nfs_rmdir_enter(dir, dentry);
if (d_really_is_positive(dentry)) {
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
/* Ensure the VFS deletes this inode */
switch (error) {
-@@ -1820,7 +1824,11 @@ int nfs_rmdir(struct inode *dir, struct
+@@ -1900,7 +1904,11 @@ int nfs_rmdir(struct inode *dir, struct
case -ENOENT:
nfs_dentry_handle_enoent(dentry);
}
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
trace_nfs_rmdir_exit(dir, dentry, error);
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
-@@ -2088,7 +2088,11 @@ static void init_once(void *foo)
+@@ -2089,7 +2089,11 @@ static void init_once(void *foo)
atomic_long_set(&nfsi->nrequests, 0);
atomic_long_set(&nfsi->commit_info.ncommit, 0);
atomic_set(&nfsi->commit_info.rpcs_out, 0);
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
-@@ -52,6 +52,29 @@ static void nfs_async_unlink_done(struct
+@@ -53,6 +53,29 @@ static void nfs_async_unlink_done(struct
rpc_restart_call_prepare(task);
}
@@ -91,8 +91,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
/**
* nfs_async_unlink_release - Release the sillydelete data.
- * @task: rpc_task of the sillydelete
-@@ -65,7 +88,7 @@ static void nfs_async_unlink_release(voi
+ * @calldata: struct nfs_unlinkdata to release
+@@ -66,7 +89,7 @@ static void nfs_async_unlink_release(voi
struct dentry *dentry = data->dentry;
struct super_block *sb = dentry->d_sb;
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
d_lookup_done(dentry);
nfs_free_unlinkdata(data);
dput(dentry);
-@@ -118,10 +141,10 @@ static int nfs_call_unlink(struct dentry
+@@ -119,10 +142,10 @@ static int nfs_call_unlink(struct dentry
struct inode *dir = d_inode(dentry->d_parent);
struct dentry *alias;
@@ -114,7 +114,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
if (!d_in_lookup(alias)) {
-@@ -143,7 +166,7 @@ static int nfs_call_unlink(struct dentry
+@@ -144,7 +167,7 @@ static int nfs_call_unlink(struct dentry
ret = 0;
spin_unlock(&alias->d_lock);
dput(alias);
@@ -125,7 +125,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* point dentry is definitely not a root, so we won't need
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
-@@ -166,7 +166,11 @@ struct nfs_inode {
+@@ -165,7 +165,11 @@ struct nfs_inode {
/* Readers: in-flight sillydelete RPC calls */
/* Writers: rmdir */
diff --git a/debian/patches-rt/fs-replace-bh_uptodate_lock-for-rt.patch b/debian/patches-rt/fs-replace-bh_uptodate_lock-for-rt.patch
index 79b922e47..74f23ccd9 100644
--- a/debian/patches-rt/fs-replace-bh_uptodate_lock-for-rt.patch
+++ b/debian/patches-rt/fs-replace-bh_uptodate_lock-for-rt.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 18 Mar 2011 09:18:52 +0100
Subject: buffer_head: Replace bh_uptodate_lock for -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Wrap the bit_spin_lock calls into a separate inline and add the RT
replacements with a real spinlock.
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/buffer.c
+++ b/fs/buffer.c
-@@ -274,8 +274,7 @@ static void end_buffer_async_read(struct
+@@ -275,8 +275,7 @@ static void end_buffer_async_read(struct
* decide that the page is now completely done.
*/
first = page_buffers(page);
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
-@@ -288,8 +287,7 @@ static void end_buffer_async_read(struct
+@@ -289,8 +288,7 @@ static void end_buffer_async_read(struct
}
tmp = tmp->b_this_page;
} while (tmp != bh);
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* If none of the buffers had errors and they are all
-@@ -301,9 +299,7 @@ static void end_buffer_async_read(struct
+@@ -302,9 +300,7 @@ static void end_buffer_async_read(struct
return;
still_busy:
@@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -330,8 +326,7 @@ void end_buffer_async_write(struct buffe
+@@ -331,8 +327,7 @@ void end_buffer_async_write(struct buffe
}
first = page_buffers(page);
@@ -57,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_buffer_async_write(bh);
unlock_buffer(bh);
-@@ -343,15 +338,12 @@ void end_buffer_async_write(struct buffe
+@@ -344,15 +339,12 @@ void end_buffer_async_write(struct buffe
}
tmp = tmp->b_this_page;
}
@@ -75,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(end_buffer_async_write);
-@@ -3368,6 +3360,7 @@ struct buffer_head *alloc_buffer_head(gf
+@@ -3372,6 +3364,7 @@ struct buffer_head *alloc_buffer_head(gf
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
@@ -103,11 +103,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- local_irq_restore(flags);
+ bh_uptodate_unlock_irqrestore(head, flags);
if (!under_io) {
- #ifdef CONFIG_EXT4_FS_ENCRYPTION
+ #ifdef CONFIG_FS_ENCRYPTION
if (data_page)
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
-@@ -106,8 +106,7 @@ static void ntfs_end_buffer_async_read(s
+@@ -92,8 +92,7 @@ static void ntfs_end_buffer_async_read(s
"0x%llx.", (unsigned long long)bh->b_blocknr);
}
first = page_buffers(page);
@@ -117,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
-@@ -122,8 +121,7 @@ static void ntfs_end_buffer_async_read(s
+@@ -108,8 +107,7 @@ static void ntfs_end_buffer_async_read(s
}
tmp = tmp->b_this_page;
} while (tmp != bh);
@@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* If none of the buffers had errors then we can set the page uptodate,
* but we first have to perform the post read mst fixups, if the
-@@ -156,9 +154,7 @@ static void ntfs_end_buffer_async_read(s
+@@ -142,9 +140,7 @@ static void ntfs_end_buffer_async_read(s
unlock_page(page);
return;
still_busy:
diff --git a/debian/patches-rt/fscache-initialize-cookie-hash-table-raw-spinlocks.patch b/debian/patches-rt/fscache-initialize-cookie-hash-table-raw-spinlocks.patch
index 11ea23fd9..c5937c5ae 100644
--- a/debian/patches-rt/fscache-initialize-cookie-hash-table-raw-spinlocks.patch
+++ b/debian/patches-rt/fscache-initialize-cookie-hash-table-raw-spinlocks.patch
@@ -1,7 +1,7 @@
From: Clark Williams <williams@redhat.com>
Date: Tue, 3 Jul 2018 13:34:30 -0500
Subject: [PATCH] fscache: initialize cookie hash table raw spinlocks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The fscache cookie mechanism uses a hash table of hlist_bl_head structures. The
PREEMPT_RT patcheset adds a raw spinlock to this structure and so on PREEMPT_RT
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
-@@ -962,3 +962,11 @@ int __fscache_check_consistency(struct f
+@@ -958,3 +958,11 @@ int __fscache_check_consistency(struct f
return -ESTALE;
}
EXPORT_SYMBOL(__fscache_check_consistency);
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+}
--- a/fs/fscache/main.c
+++ b/fs/fscache/main.c
-@@ -149,6 +149,7 @@ static int __init fscache_init(void)
+@@ -145,6 +145,7 @@ static int __init fscache_init(void)
ret = -ENOMEM;
goto error_cookie_jar;
}
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!fscache_root)
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
-@@ -230,6 +230,7 @@ extern void __fscache_readpages_cancel(s
+@@ -226,6 +226,7 @@ extern void __fscache_readpages_cancel(s
extern void __fscache_disable_cookie(struct fscache_cookie *, const void *, bool);
extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_t,
bool (*)(void *), void *);
diff --git a/debian/patches-rt/ftrace-Fix-trace-header-alignment.patch b/debian/patches-rt/ftrace-Fix-trace-header-alignment.patch
index df9dda46b..75ea8bd18 100644
--- a/debian/patches-rt/ftrace-Fix-trace-header-alignment.patch
+++ b/debian/patches-rt/ftrace-Fix-trace-header-alignment.patch
@@ -1,7 +1,7 @@
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Sun, 16 Oct 2016 05:08:30 +0200
Subject: [PATCH] ftrace: Fix trace header alignment
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Line up helper arrows to the right column.
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -3349,17 +3349,17 @@ get_total_entries(struct trace_buffer *b
+@@ -3557,17 +3557,17 @@ unsigned long trace_total_entries(struct
static void print_lat_help_header(struct seq_file *m)
{
diff --git a/debian/patches-rt/ftrace-migrate-disable-tracing.patch b/debian/patches-rt/ftrace-migrate-disable-tracing.patch
index f0fbf6984..a381e4e86 100644
--- a/debian/patches-rt/ftrace-migrate-disable-tracing.patch
+++ b/debian/patches-rt/ftrace-migrate-disable-tracing.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 21:56:42 +0200
Subject: trace: Add migrate-disabled counter to tracing output
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TRACE_EVENT_TYPE_MAX \
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2146,6 +2146,8 @@ tracing_generic_entry_update(struct trac
+@@ -2330,6 +2330,8 @@ tracing_generic_entry_update(struct trac
((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
-@@ -3350,9 +3352,10 @@ static void print_lat_help_header(struct
+@@ -3558,9 +3560,10 @@ static void print_lat_help_header(struct
"# | / _----=> need-resched \n"
"# || / _---=> hardirq/softirq \n"
"# ||| / _--=> preempt-depth \n"
diff --git a/debian/patches-rt/futex-Delay-deallocation-of-pi_state.patch b/debian/patches-rt/futex-Delay-deallocation-of-pi_state.patch
new file mode 100644
index 000000000..cf2ba6120
--- /dev/null
+++ b/debian/patches-rt/futex-Delay-deallocation-of-pi_state.patch
@@ -0,0 +1,174 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 26 Jun 2019 13:35:36 +0200
+Subject: [PATCH] futex: Delay deallocation of pi_state
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+On -RT we can't invoke kfree() in a non-preemptible context.
+
+Defer the deallocation of pi_state to preemptible context.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/futex.c | 55 ++++++++++++++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 44 insertions(+), 11 deletions(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -814,13 +814,13 @@ static void get_pi_state(struct futex_pi
+ * Drops a reference to the pi_state object and frees or caches it
+ * when the last reference is gone.
+ */
+-static void put_pi_state(struct futex_pi_state *pi_state)
++static struct futex_pi_state *__put_pi_state(struct futex_pi_state *pi_state)
+ {
+ if (!pi_state)
+- return;
++ return NULL;
+
+ if (!refcount_dec_and_test(&pi_state->refcount))
+- return;
++ return NULL;
+
+ /*
+ * If pi_state->owner is NULL, the owner is most probably dying
+@@ -840,9 +840,7 @@ static void put_pi_state(struct futex_pi
+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+ }
+
+- if (current->pi_state_cache) {
+- kfree(pi_state);
+- } else {
++ if (!current->pi_state_cache) {
+ /*
+ * pi_state->list is already empty.
+ * clear pi_state->owner.
+@@ -851,6 +849,30 @@ static void put_pi_state(struct futex_pi
+ pi_state->owner = NULL;
+ refcount_set(&pi_state->refcount, 1);
+ current->pi_state_cache = pi_state;
++ pi_state = NULL;
++ }
++ return pi_state;
++}
++
++static void put_pi_state(struct futex_pi_state *pi_state)
++{
++ kfree(__put_pi_state(pi_state));
++}
++
++static void put_pi_state_atomic(struct futex_pi_state *pi_state,
++ struct list_head *to_free)
++{
++ if (__put_pi_state(pi_state))
++ list_add(&pi_state->list, to_free);
++}
++
++static void free_pi_state_list(struct list_head *to_free)
++{
++ struct futex_pi_state *p, *next;
++
++ list_for_each_entry_safe(p, next, to_free, list) {
++ list_del(&p->list);
++ kfree(p);
+ }
+ }
+
+@@ -867,6 +889,7 @@ void exit_pi_state_list(struct task_stru
+ struct futex_pi_state *pi_state;
+ struct futex_hash_bucket *hb;
+ union futex_key key = FUTEX_KEY_INIT;
++ LIST_HEAD(to_free);
+
+ if (!futex_cmpxchg_enabled)
+ return;
+@@ -911,7 +934,7 @@ void exit_pi_state_list(struct task_stru
+ /* retain curr->pi_lock for the loop invariant */
+ raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+ raw_spin_unlock(&hb->lock);
+- put_pi_state(pi_state);
++ put_pi_state_atomic(pi_state, &to_free);
+ continue;
+ }
+
+@@ -930,6 +953,8 @@ void exit_pi_state_list(struct task_stru
+ raw_spin_lock_irq(&curr->pi_lock);
+ }
+ raw_spin_unlock_irq(&curr->pi_lock);
++
++ free_pi_state_list(&to_free);
+ }
+
+ #endif
+@@ -1910,6 +1935,7 @@ static int futex_requeue(u32 __user *uad
+ struct futex_hash_bucket *hb1, *hb2;
+ struct futex_q *this, *next;
+ DEFINE_WAKE_Q(wake_q);
++ LIST_HEAD(to_free);
+
+ if (nr_wake < 0 || nr_requeue < 0)
+ return -EINVAL;
+@@ -2147,7 +2173,7 @@ static int futex_requeue(u32 __user *uad
+ * object.
+ */
+ this->pi_state = NULL;
+- put_pi_state(pi_state);
++ put_pi_state_atomic(pi_state, &to_free);
+ /*
+ * We stop queueing more waiters and let user
+ * space deal with the mess.
+@@ -2164,7 +2190,7 @@ static int futex_requeue(u32 __user *uad
+ * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
+ * need to drop it here again.
+ */
+- put_pi_state(pi_state);
++ put_pi_state_atomic(pi_state, &to_free);
+
+ out_unlock:
+ double_unlock_hb(hb1, hb2);
+@@ -2185,6 +2211,7 @@ static int futex_requeue(u32 __user *uad
+ out_put_key1:
+ put_futex_key(&key1);
+ out:
++ free_pi_state_list(&to_free);
+ return ret ? ret : task_count;
+ }
+
+@@ -2321,13 +2348,16 @@ static int unqueue_me(struct futex_q *q)
+ static void unqueue_me_pi(struct futex_q *q)
+ __releases(q->lock_ptr)
+ {
++ struct futex_pi_state *ps;
++
+ __unqueue_futex(q);
+
+ BUG_ON(!q->pi_state);
+- put_pi_state(q->pi_state);
++ ps = __put_pi_state(q->pi_state);
+ q->pi_state = NULL;
+
+ raw_spin_unlock(q->lock_ptr);
++ kfree(ps);
+ }
+
+ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+@@ -3279,6 +3309,8 @@ static int futex_wait_requeue_pi(u32 __u
+ * did a lock-steal - fix up the PI-state in that case.
+ */
+ if (q.pi_state && (q.pi_state->owner != current)) {
++ struct futex_pi_state *ps_free;
++
+ raw_spin_lock(q.lock_ptr);
+ ret = fixup_pi_state_owner(uaddr2, &q, current);
+ if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
+@@ -3289,8 +3321,9 @@ static int futex_wait_requeue_pi(u32 __u
+ * Drop the reference to the pi state which
+ * the requeue_pi() code acquired for us.
+ */
+- put_pi_state(q.pi_state);
++ ps_free = __put_pi_state(q.pi_state);
+ raw_spin_unlock(q.lock_ptr);
++ kfree(ps_free);
+ }
+ } else {
+ struct rt_mutex *pi_mutex;
diff --git a/debian/patches-rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/debian/patches-rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
deleted file mode 100644
index 35d06e43a..000000000
--- a/debian/patches-rt/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 1 Mar 2013 11:17:42 +0100
-Subject: futex: Ensure lock/unlock symetry versus pi_lock and hash bucket lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-In exit_pi_state_list() we have the following locking construct:
-
- spin_lock(&hb->lock);
- raw_spin_lock_irq(&curr->pi_lock);
-
- ...
- spin_unlock(&hb->lock);
-
-In !RT this works, but on RT the migrate_enable() function which is
-called from spin_unlock() sees atomic context due to the held pi_lock
-and just decrements the migrate_disable_atomic counter of the
-task. Now the next call to migrate_disable() sees the counter being
-negative and issues a warning. That check should be in
-migrate_enable() already.
-
-Fix this by dropping pi_lock before unlocking hb->lock and reaquire
-pi_lock after that again. This is safe as the loop code reevaluates
-head again under the pi_lock.
-
-Reported-by: Yong Zhang <yong.zhang@windriver.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/futex.c | 2 ++
- 1 file changed, 2 insertions(+)
-
---- a/kernel/futex.c
-+++ b/kernel/futex.c
-@@ -926,7 +926,9 @@ void exit_pi_state_list(struct task_stru
- if (head->next != next) {
- /* retain curr->pi_lock for the loop invariant */
- raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
-+ raw_spin_unlock_irq(&curr->pi_lock);
- spin_unlock(&hb->lock);
-+ raw_spin_lock_irq(&curr->pi_lock);
- put_pi_state(pi_state);
- continue;
- }
diff --git a/debian/patches-rt/futex-Make-the-futex_hash_bucket-lock-raw.patch b/debian/patches-rt/futex-Make-the-futex_hash_bucket-lock-raw.patch
new file mode 100644
index 000000000..93b023d46
--- /dev/null
+++ b/debian/patches-rt/futex-Make-the-futex_hash_bucket-lock-raw.patch
@@ -0,0 +1,331 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 26 Jun 2019 11:59:44 +0200
+Subject: [PATCH] futex: Make the futex_hash_bucket lock raw
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+Since commit 1a1fb985f2e2b ("futex: Handle early deadlock return
+correctly") we can deadlock while we attempt to acquire the HB lock if
+we fail to acquire the lock.
+The RT waiter (for the futex lock) is still enqueued and acquiring the
+HB lock may build up a lock chain which leads to a deadlock if the owner
+of the lock futex-lock holds the HB lock.
+
+Make the hash bucket lock raw so it does not participate in the
+lockchain.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/futex.c | 86 ++++++++++++++++++++++++++++-----------------------------
+ 1 file changed, 43 insertions(+), 43 deletions(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -231,7 +231,7 @@ struct futex_q {
+ struct plist_node list;
+
+ struct task_struct *task;
+- spinlock_t *lock_ptr;
++ raw_spinlock_t *lock_ptr;
+ union futex_key key;
+ struct futex_pi_state *pi_state;
+ struct rt_mutex_waiter *rt_waiter;
+@@ -252,7 +252,7 @@ static const struct futex_q futex_q_init
+ */
+ struct futex_hash_bucket {
+ atomic_t waiters;
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ struct plist_head chain;
+ } ____cacheline_aligned_in_smp;
+
+@@ -900,7 +900,7 @@ void exit_pi_state_list(struct task_stru
+ }
+ raw_spin_unlock_irq(&curr->pi_lock);
+
+- spin_lock(&hb->lock);
++ raw_spin_lock(&hb->lock);
+ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+ raw_spin_lock(&curr->pi_lock);
+ /*
+@@ -910,7 +910,7 @@ void exit_pi_state_list(struct task_stru
+ if (head->next != next) {
+ /* retain curr->pi_lock for the loop invariant */
+ raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+- spin_unlock(&hb->lock);
++ raw_spin_unlock(&hb->lock);
+ put_pi_state(pi_state);
+ continue;
+ }
+@@ -922,7 +922,7 @@ void exit_pi_state_list(struct task_stru
+
+ raw_spin_unlock(&curr->pi_lock);
+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+- spin_unlock(&hb->lock);
++ raw_spin_unlock(&hb->lock);
+
+ rt_mutex_futex_unlock(&pi_state->pi_mutex);
+ put_pi_state(pi_state);
+@@ -1542,21 +1542,21 @@ static inline void
+ double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
+ {
+ if (hb1 <= hb2) {
+- spin_lock(&hb1->lock);
++ raw_spin_lock(&hb1->lock);
+ if (hb1 < hb2)
+- spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
++ raw_spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
+ } else { /* hb1 > hb2 */
+- spin_lock(&hb2->lock);
+- spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
++ raw_spin_lock(&hb2->lock);
++ raw_spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
+ }
+ }
+
+ static inline void
+ double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
+ {
+- spin_unlock(&hb1->lock);
++ raw_spin_unlock(&hb1->lock);
+ if (hb1 != hb2)
+- spin_unlock(&hb2->lock);
++ raw_spin_unlock(&hb2->lock);
+ }
+
+ /*
+@@ -1584,7 +1584,7 @@ futex_wake(u32 __user *uaddr, unsigned i
+ if (!hb_waiters_pending(hb))
+ goto out_put_key;
+
+- spin_lock(&hb->lock);
++ raw_spin_lock(&hb->lock);
+
+ plist_for_each_entry_safe(this, next, &hb->chain, list) {
+ if (match_futex (&this->key, &key)) {
+@@ -1603,7 +1603,7 @@ futex_wake(u32 __user *uaddr, unsigned i
+ }
+ }
+
+- spin_unlock(&hb->lock);
++ raw_spin_unlock(&hb->lock);
+ wake_up_q(&wake_q);
+ out_put_key:
+ put_futex_key(&key);
+@@ -2208,7 +2208,7 @@ static inline struct futex_hash_bucket *
+
+ q->lock_ptr = &hb->lock;
+
+- spin_lock(&hb->lock);
++ raw_spin_lock(&hb->lock);
+ return hb;
+ }
+
+@@ -2216,7 +2216,7 @@ static inline void
+ queue_unlock(struct futex_hash_bucket *hb)
+ __releases(&hb->lock)
+ {
+- spin_unlock(&hb->lock);
++ raw_spin_unlock(&hb->lock);
+ hb_waiters_dec(hb);
+ }
+
+@@ -2255,7 +2255,7 @@ static inline void queue_me(struct futex
+ __releases(&hb->lock)
+ {
+ __queue_me(q, hb);
+- spin_unlock(&hb->lock);
++ raw_spin_unlock(&hb->lock);
+ }
+
+ /**
+@@ -2271,41 +2271,41 @@ static inline void queue_me(struct futex
+ */
+ static int unqueue_me(struct futex_q *q)
+ {
+- spinlock_t *lock_ptr;
++ raw_spinlock_t *lock_ptr;
+ int ret = 0;
+
+ /* In the common case we don't take the spinlock, which is nice. */
+ retry:
+ /*
+- * q->lock_ptr can change between this read and the following spin_lock.
+- * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
+- * optimizing lock_ptr out of the logic below.
++ * q->lock_ptr can change between this read and the following
++ * raw_spin_lock. Use READ_ONCE to forbid the compiler from reloading
++ * q->lock_ptr and optimizing lock_ptr out of the logic below.
+ */
+ lock_ptr = READ_ONCE(q->lock_ptr);
+ if (lock_ptr != NULL) {
+- spin_lock(lock_ptr);
++ raw_spin_lock(lock_ptr);
+ /*
+ * q->lock_ptr can change between reading it and
+- * spin_lock(), causing us to take the wrong lock. This
++ * raw_spin_lock(), causing us to take the wrong lock. This
+ * corrects the race condition.
+ *
+ * Reasoning goes like this: if we have the wrong lock,
+ * q->lock_ptr must have changed (maybe several times)
+- * between reading it and the spin_lock(). It can
+- * change again after the spin_lock() but only if it was
+- * already changed before the spin_lock(). It cannot,
++ * between reading it and the raw_spin_lock(). It can
++ * change again after the raw_spin_lock() but only if it was
++ * already changed before the raw_spin_lock(). It cannot,
+ * however, change back to the original value. Therefore
+ * we can detect whether we acquired the correct lock.
+ */
+ if (unlikely(lock_ptr != q->lock_ptr)) {
+- spin_unlock(lock_ptr);
++ raw_spin_unlock(lock_ptr);
+ goto retry;
+ }
+ __unqueue_futex(q);
+
+ BUG_ON(q->pi_state);
+
+- spin_unlock(lock_ptr);
++ raw_spin_unlock(lock_ptr);
+ ret = 1;
+ }
+
+@@ -2327,7 +2327,7 @@ static void unqueue_me_pi(struct futex_q
+ put_pi_state(q->pi_state);
+ q->pi_state = NULL;
+
+- spin_unlock(q->lock_ptr);
++ raw_spin_unlock(q->lock_ptr);
+ }
+
+ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+@@ -2460,7 +2460,7 @@ static int fixup_pi_state_owner(u32 __us
+ */
+ handle_err:
+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+- spin_unlock(q->lock_ptr);
++ raw_spin_unlock(q->lock_ptr);
+
+ switch (err) {
+ case -EFAULT:
+@@ -2478,7 +2478,7 @@ static int fixup_pi_state_owner(u32 __us
+ break;
+ }
+
+- spin_lock(q->lock_ptr);
++ raw_spin_lock(q->lock_ptr);
+ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+
+ /*
+@@ -2574,7 +2574,7 @@ static void futex_wait_queue_me(struct f
+ /*
+ * The task state is guaranteed to be set before another task can
+ * wake it. set_current_state() is implemented using smp_store_mb() and
+- * queue_me() calls spin_unlock() upon completion, both serializing
++ * queue_me() calls raw_spin_unlock() upon completion, both serializing
+ * access to the hash list and forcing another memory barrier.
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+@@ -2867,7 +2867,7 @@ static int futex_lock_pi(u32 __user *uad
+ * before __rt_mutex_start_proxy_lock() is done.
+ */
+ raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
+- spin_unlock(q.lock_ptr);
++ raw_spin_unlock(q.lock_ptr);
+ /*
+ * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
+ * such that futex_unlock_pi() is guaranteed to observe the waiter when
+@@ -2888,7 +2888,7 @@ static int futex_lock_pi(u32 __user *uad
+ ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
+
+ cleanup:
+- spin_lock(q.lock_ptr);
++ raw_spin_lock(q.lock_ptr);
+ /*
+ * If we failed to acquire the lock (deadlock/signal/timeout), we must
+ * first acquire the hb->lock before removing the lock from the
+@@ -2989,7 +2989,7 @@ static int futex_unlock_pi(u32 __user *u
+ return ret;
+
+ hb = hash_futex(&key);
+- spin_lock(&hb->lock);
++ raw_spin_lock(&hb->lock);
+
+ /*
+ * Check waiters first. We do not trust user space values at
+@@ -3023,7 +3023,7 @@ static int futex_unlock_pi(u32 __user *u
+ * rt_waiter. Also see the WARN in wake_futex_pi().
+ */
+ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+- spin_unlock(&hb->lock);
++ raw_spin_unlock(&hb->lock);
+
+ /* drops pi_state->pi_mutex.wait_lock */
+ ret = wake_futex_pi(uaddr, uval, pi_state);
+@@ -3062,7 +3062,7 @@ static int futex_unlock_pi(u32 __user *u
+ * owner.
+ */
+ if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) {
+- spin_unlock(&hb->lock);
++ raw_spin_unlock(&hb->lock);
+ switch (ret) {
+ case -EFAULT:
+ goto pi_faulted;
+@@ -3082,7 +3082,7 @@ static int futex_unlock_pi(u32 __user *u
+ ret = (curval == uval) ? 0 : -EAGAIN;
+
+ out_unlock:
+- spin_unlock(&hb->lock);
++ raw_spin_unlock(&hb->lock);
+ out_putkey:
+ put_futex_key(&key);
+ return ret;
+@@ -3257,9 +3257,9 @@ static int futex_wait_requeue_pi(u32 __u
+ /* Queue the futex_q, drop the hb lock, wait for wakeup. */
+ futex_wait_queue_me(hb, &q, to);
+
+- spin_lock(&hb->lock);
++ raw_spin_lock(&hb->lock);
+ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
+- spin_unlock(&hb->lock);
++ raw_spin_unlock(&hb->lock);
+ if (ret)
+ goto out_put_keys;
+
+@@ -3279,7 +3279,7 @@ static int futex_wait_requeue_pi(u32 __u
+ * did a lock-steal - fix up the PI-state in that case.
+ */
+ if (q.pi_state && (q.pi_state->owner != current)) {
+- spin_lock(q.lock_ptr);
++ raw_spin_lock(q.lock_ptr);
+ ret = fixup_pi_state_owner(uaddr2, &q, current);
+ if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
+ pi_state = q.pi_state;
+@@ -3290,7 +3290,7 @@ static int futex_wait_requeue_pi(u32 __u
+ * the requeue_pi() code acquired for us.
+ */
+ put_pi_state(q.pi_state);
+- spin_unlock(q.lock_ptr);
++ raw_spin_unlock(q.lock_ptr);
+ }
+ } else {
+ struct rt_mutex *pi_mutex;
+@@ -3304,7 +3304,7 @@ static int futex_wait_requeue_pi(u32 __u
+ pi_mutex = &q.pi_state->pi_mutex;
+ ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
+
+- spin_lock(q.lock_ptr);
++ raw_spin_lock(q.lock_ptr);
+ if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
+ ret = 0;
+
+@@ -3929,7 +3929,7 @@ static int __init futex_init(void)
+ for (i = 0; i < futex_hashsize; i++) {
+ atomic_set(&futex_queues[i].waiters, 0);
+ plist_head_init(&futex_queues[i].chain);
+- spin_lock_init(&futex_queues[i].lock);
++ raw_spin_lock_init(&futex_queues[i].lock);
+ }
+
+ return 0;
diff --git a/debian/patches-rt/futex-requeue-pi-fix.patch b/debian/patches-rt/futex-requeue-pi-fix.patch
deleted file mode 100644
index b7ab28303..000000000
--- a/debian/patches-rt/futex-requeue-pi-fix.patch
+++ /dev/null
@@ -1,113 +0,0 @@
-From: Steven Rostedt <rostedt@goodmis.org>
-Date: Tue, 14 Jul 2015 14:26:34 +0200
-Subject: futex: Fix bug on when a requeued RT task times out
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Requeue with timeout causes a bug with PREEMPT_RT_FULL.
-
-The bug comes from a timed out condition.
-
-
- TASK 1 TASK 2
- ------ ------
- futex_wait_requeue_pi()
- futex_wait_queue_me()
- <timed out>
-
- double_lock_hb();
-
- raw_spin_lock(pi_lock);
- if (current->pi_blocked_on) {
- } else {
- current->pi_blocked_on = PI_WAKE_INPROGRESS;
- run_spin_unlock(pi_lock);
- spin_lock(hb->lock); <-- blocked!
-
-
- plist_for_each_entry_safe(this) {
- rt_mutex_start_proxy_lock();
- task_blocks_on_rt_mutex();
- BUG_ON(task->pi_blocked_on)!!!!
-
-The BUG_ON() actually has a check for PI_WAKE_INPROGRESS, but the
-problem is that, after TASK 1 sets PI_WAKE_INPROGRESS, it then tries to
-grab the hb->lock, which it fails to do so. As the hb->lock is a mutex,
-it will block and set the "pi_blocked_on" to the hb->lock.
-
-When TASK 2 goes to requeue it, the check for PI_WAKE_INPROGESS fails
-because the task1's pi_blocked_on is no longer set to that, but instead,
-set to the hb->lock.
-
-The fix:
-
-When calling rt_mutex_start_proxy_lock() a check is made to see
-if the proxy tasks pi_blocked_on is set. If so, exit out early.
-Otherwise set it to a new flag PI_REQUEUE_INPROGRESS, which notifies
-the proxy task that it is being requeued, and will handle things
-appropriately.
-
-
-Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- kernel/locking/rtmutex.c | 31 ++++++++++++++++++++++++++++++-
- kernel/locking/rtmutex_common.h | 1 +
- 2 files changed, 31 insertions(+), 1 deletion(-)
-
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -137,7 +137,8 @@ static void fixup_rt_mutex_waiters(struc
-
- static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
- {
-- return waiter && waiter != PI_WAKEUP_INPROGRESS;
-+ return waiter && waiter != PI_WAKEUP_INPROGRESS &&
-+ waiter != PI_REQUEUE_INPROGRESS;
- }
-
- /*
-@@ -1784,6 +1785,34 @@ int __rt_mutex_start_proxy_lock(struct r
- if (try_to_take_rt_mutex(lock, task, NULL))
- return 1;
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ /*
-+ * In PREEMPT_RT there's an added race.
-+ * If the task, that we are about to requeue, times out,
-+ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
-+ * to skip this task. But right after the task sets
-+ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
-+ * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
-+ * This will replace the PI_WAKEUP_INPROGRESS with the actual
-+ * lock that it blocks on. We *must not* place this task
-+ * on this proxy lock in that case.
-+ *
-+ * To prevent this race, we first take the task's pi_lock
-+ * and check if it has updated its pi_blocked_on. If it has,
-+ * we assume that it woke up and we return -EAGAIN.
-+ * Otherwise, we set the task's pi_blocked_on to
-+ * PI_REQUEUE_INPROGRESS, so that if the task is waking up
-+ * it will know that we are in the process of requeuing it.
-+ */
-+ raw_spin_lock(&task->pi_lock);
-+ if (task->pi_blocked_on) {
-+ raw_spin_unlock(&task->pi_lock);
-+ return -EAGAIN;
-+ }
-+ task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
-+ raw_spin_unlock(&task->pi_lock);
-+#endif
-+
- /* We enforce deadlock detection for futexes */
- ret = task_blocks_on_rt_mutex(lock, waiter, task,
- RT_MUTEX_FULL_CHAINWALK);
---- a/kernel/locking/rtmutex_common.h
-+++ b/kernel/locking/rtmutex_common.h
-@@ -131,6 +131,7 @@ enum rtmutex_chainwalk {
- * PI-futex support (proxy locking functions, etc.):
- */
- #define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
-+#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2)
-
- extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
- extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
diff --git a/debian/patches-rt/futex-workaround-migrate_disable-enable-in-different.patch b/debian/patches-rt/futex-workaround-migrate_disable-enable-in-different.patch
deleted file mode 100644
index c9f5d342b..000000000
--- a/debian/patches-rt/futex-workaround-migrate_disable-enable-in-different.patch
+++ /dev/null
@@ -1,61 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Wed, 8 Mar 2017 14:23:35 +0100
-Subject: [PATCH] futex: workaround migrate_disable/enable in different context
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-migrate_disable()/migrate_enable() takes a different path in atomic() vs
-!atomic() context. These little hacks ensure that we don't underflow / overflow
-the migrate code counts properly while we lock the hb lockwith interrupts
-enabled and unlock it with interrupts disabled.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/futex.c | 18 ++++++++++++++++++
- 1 file changed, 18 insertions(+)
-
---- a/kernel/futex.c
-+++ b/kernel/futex.c
-@@ -2867,6 +2867,14 @@ static int futex_lock_pi(u32 __user *uad
- * before __rt_mutex_start_proxy_lock() is done.
- */
- raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
-+ /*
-+ * the migrate_disable() here disables migration in the in_atomic() fast
-+ * path which is enabled again in the following spin_unlock(). We have
-+ * one migrate_disable() pending in the slow-path which is reversed
-+ * after the raw_spin_unlock_irq() where we leave the atomic context.
-+ */
-+ migrate_disable();
-+
- spin_unlock(q.lock_ptr);
- /*
- * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
-@@ -2875,6 +2883,7 @@ static int futex_lock_pi(u32 __user *uad
- */
- ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
- raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
-+ migrate_enable();
-
- if (ret) {
- if (ret == 1)
-@@ -3023,10 +3032,19 @@ static int futex_unlock_pi(u32 __user *u
- * rt_waiter. Also see the WARN in wake_futex_pi().
- */
- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
-+ /*
-+ * Magic trickery for now to make the RT migrate disable
-+ * logic happy. The following spin_unlock() happens with
-+ * interrupts disabled so the internal migrate_enable()
-+ * won't undo the migrate_disable() which was issued when
-+ * locking hb->lock.
-+ */
-+ migrate_disable();
- spin_unlock(&hb->lock);
-
- /* drops pi_state->pi_mutex.wait_lock */
- ret = wake_futex_pi(uaddr, uval, pi_state);
-+ migrate_enable();
-
- put_pi_state(pi_state);
-
diff --git a/debian/patches-rt/genirq-disable-irqpoll-on-rt.patch b/debian/patches-rt/genirq-disable-irqpoll-on-rt.patch
index 799e71619..0297124ec 100644
--- a/debian/patches-rt/genirq-disable-irqpoll-on-rt.patch
+++ b/debian/patches-rt/genirq-disable-irqpoll-on-rt.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:57 -0500
Subject: genirq: Disable irqpoll on -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Creates long latencies for no value
diff --git a/debian/patches-rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch b/debian/patches-rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
deleted file mode 100644
index eb00e1ead..000000000
--- a/debian/patches-rt/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
+++ /dev/null
@@ -1,103 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 21 Aug 2013 17:48:46 +0200
-Subject: genirq: Do not invoke the affinity callback via a workqueue on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Joe Korty reported, that __irq_set_affinity_locked() schedules a
-workqueue while holding a rawlock which results in a might_sleep()
-warning.
-This patch uses swork_queue() instead.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/interrupt.h | 5 +++++
- kernel/irq/manage.c | 32 +++++++++++++++++++++++++++++---
- 2 files changed, 34 insertions(+), 3 deletions(-)
-
---- a/include/linux/interrupt.h
-+++ b/include/linux/interrupt.h
-@@ -13,6 +13,7 @@
- #include <linux/hrtimer.h>
- #include <linux/kref.h>
- #include <linux/workqueue.h>
-+#include <linux/kthread.h>
-
- #include <linux/atomic.h>
- #include <asm/ptrace.h>
-@@ -236,7 +237,11 @@ extern void resume_device_irqs(void);
- struct irq_affinity_notify {
- unsigned int irq;
- struct kref kref;
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ struct kthread_work work;
-+#else
- struct work_struct work;
-+#endif
- void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
- void (*release)(struct kref *ref);
- };
---- a/kernel/irq/manage.c
-+++ b/kernel/irq/manage.c
-@@ -259,7 +259,12 @@ int irq_set_affinity_locked(struct irq_d
-
- if (desc->affinity_notify) {
- kref_get(&desc->affinity_notify->kref);
-+
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ kthread_schedule_work(&desc->affinity_notify->work);
-+#else
- schedule_work(&desc->affinity_notify->work);
-+#endif
- }
- irqd_set(data, IRQD_AFFINITY_SET);
-
-@@ -297,10 +302,8 @@ int irq_set_affinity_hint(unsigned int i
- }
- EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
-
--static void irq_affinity_notify(struct work_struct *work)
-+static void _irq_affinity_notify(struct irq_affinity_notify *notify)
- {
-- struct irq_affinity_notify *notify =
-- container_of(work, struct irq_affinity_notify, work);
- struct irq_desc *desc = irq_to_desc(notify->irq);
- cpumask_var_t cpumask;
- unsigned long flags;
-@@ -322,6 +325,25 @@ static void irq_affinity_notify(struct w
- kref_put(&notify->kref, notify->release);
- }
-
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+
-+static void irq_affinity_notify(struct kthread_work *work)
-+{
-+ struct irq_affinity_notify *notify =
-+ container_of(work, struct irq_affinity_notify, work);
-+ _irq_affinity_notify(notify);
-+}
-+
-+#else
-+
-+static void irq_affinity_notify(struct work_struct *work)
-+{
-+ struct irq_affinity_notify *notify =
-+ container_of(work, struct irq_affinity_notify, work);
-+ _irq_affinity_notify(notify);
-+}
-+#endif
-+
- /**
- * irq_set_affinity_notifier - control notification of IRQ affinity changes
- * @irq: Interrupt for which to enable/disable notification
-@@ -350,7 +372,11 @@ irq_set_affinity_notifier(unsigned int i
- if (notify) {
- notify->irq = irq;
- kref_init(&notify->kref);
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ kthread_init_work(&notify->work, irq_affinity_notify);
-+#else
- INIT_WORK(&notify->work, irq_affinity_notify);
-+#endif
- }
-
- raw_spin_lock_irqsave(&desc->lock, flags);
diff --git a/debian/patches-rt/genirq-force-threading.patch b/debian/patches-rt/genirq-force-threading.patch
index bdb979bdd..6d61a6504 100644
--- a/debian/patches-rt/genirq-force-threading.patch
+++ b/debian/patches-rt/genirq-force-threading.patch
@@ -1,7 +1,7 @@
Subject: genirq: Force interrupt thread on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 03 Apr 2011 11:57:29 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Force threaded_irqs and optimize the code (force_irqthreads) in regard
to this.
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -443,7 +443,11 @@ extern int irq_set_irqchip_state(unsigne
+@@ -472,7 +472,11 @@ extern int irq_set_irqchip_state(unsigne
bool state);
#ifdef CONFIG_IRQ_FORCED_THREADING
diff --git a/debian/patches-rt/genirq-update-irq_set_irqchip_state-documentation.patch b/debian/patches-rt/genirq-update-irq_set_irqchip_state-documentation.patch
index 8a6f86772..e58d81191 100644
--- a/debian/patches-rt/genirq-update-irq_set_irqchip_state-documentation.patch
+++ b/debian/patches-rt/genirq-update-irq_set_irqchip_state-documentation.patch
@@ -1,7 +1,7 @@
From: Josh Cartwright <joshc@ni.com>
Date: Thu, 11 Feb 2016 11:54:00 -0600
Subject: genirq: update irq_set_irqchip_state documentation
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
On -rt kernels, the use of migrate_disable()/migrate_enable() is
sufficient to guarantee a task isn't moved to another CPU. Update the
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -2266,7 +2266,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
+@@ -2636,7 +2636,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
* This call sets the internal irqchip state of an interrupt,
* depending on the value of @which.
*
diff --git a/debian/patches-rt/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch b/debian/patches-rt/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
index 5b6b98ed3..c91df1f64 100644
--- a/debian/patches-rt/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
+++ b/debian/patches-rt/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 4 Aug 2017 18:31:00 +0200
Subject: [PATCH] hotplug: duct-tape RT-rwlock usage for non-RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
This type is only available on -RT. We need to craft something for
non-RT. Since the only migrate_disable() user is -RT only, there is no
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -75,7 +75,7 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_s
+@@ -76,7 +76,7 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_s
.fail = CPUHP_INVALID,
};
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static DEFINE_PER_CPU(struct rt_rw_lock, cpuhp_pin_lock) = \
__RWLOCK_RT_INITIALIZER(cpuhp_pin_lock);
#endif
-@@ -291,6 +291,7 @@ static int cpu_hotplug_disabled;
+@@ -292,6 +292,7 @@ static int cpu_hotplug_disabled;
*/
void pin_current_cpu(void)
{
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rt_rw_lock *cpuhp_pin;
unsigned int cpu;
int ret;
-@@ -313,6 +314,7 @@ void pin_current_cpu(void)
+@@ -314,6 +315,7 @@ void pin_current_cpu(void)
goto again;
}
current->pinned_on_cpu = cpu;
@@ -39,7 +39,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -320,6 +322,7 @@ void pin_current_cpu(void)
+@@ -321,6 +323,7 @@ void pin_current_cpu(void)
*/
void unpin_current_cpu(void)
{
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rt_rw_lock *cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock);
if (WARN_ON(current->pinned_on_cpu != smp_processor_id()))
-@@ -327,6 +330,7 @@ void unpin_current_cpu(void)
+@@ -328,6 +331,7 @@ void unpin_current_cpu(void)
current->pinned_on_cpu = -1;
__read_rt_unlock(cpuhp_pin);
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
-@@ -915,7 +919,9 @@ static int take_cpu_down(void *_param)
+@@ -918,7 +922,9 @@ static int take_cpu_down(void *_param)
static int takedown_cpu(unsigned int cpu)
{
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
-@@ -928,14 +934,18 @@ static int takedown_cpu(unsigned int cpu
+@@ -931,14 +937,18 @@ static int takedown_cpu(unsigned int cpu
*/
irq_lock_sparse();
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* CPU refused to die */
irq_unlock_sparse();
/* Unpark the hotplug thread so we can rollback there */
-@@ -954,7 +964,9 @@ static int takedown_cpu(unsigned int cpu
+@@ -957,7 +967,9 @@ static int takedown_cpu(unsigned int cpu
wait_for_ap_thread(st, false);
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
diff --git a/debian/patches-rt/hotplug-light-get-online-cpus.patch b/debian/patches-rt/hotplug-light-get-online-cpus.patch
index 297332b6b..d516a47cf 100644
--- a/debian/patches-rt/hotplug-light-get-online-cpus.patch
+++ b/debian/patches-rt/hotplug-light-get-online-cpus.patch
@@ -1,7 +1,7 @@
Subject: hotplug: Lightweight get online cpus
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 15 Jun 2011 12:36:06 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
get_online_cpus() is a heavy weight function which involves a global
mutex. migrate_disable() wants a simpler construct which prevents only
@@ -15,12 +15,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/cpu.h | 5 +++++
kernel/cpu.c | 15 +++++++++++++++
- kernel/sched/core.c | 4 ++++
- 3 files changed, 24 insertions(+)
+ kernel/sched/core.c | 3 +++
+ 3 files changed, 23 insertions(+)
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
-@@ -111,6 +111,8 @@ extern void cpu_hotplug_disable(void);
+@@ -113,6 +113,8 @@ extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
void clear_tasks_mm_cpumask(int cpu);
int cpu_down(unsigned int cpu);
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else /* CONFIG_HOTPLUG_CPU */
-@@ -122,6 +124,9 @@ static inline int cpus_read_trylock(voi
+@@ -124,6 +126,9 @@ static inline int cpus_read_trylock(voi
static inline void lockdep_assert_cpus_held(void) { }
static inline void cpu_hotplug_disable(void) { }
static inline void cpu_hotplug_enable(void) { }
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Wrappers which go away once all code is converted */
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -281,6 +281,21 @@ static int cpu_hotplug_disabled;
+@@ -282,6 +282,21 @@ static int cpu_hotplug_disabled;
#ifdef CONFIG_HOTPLUG_CPU
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void cpus_read_lock(void)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7216,6 +7216,7 @@ void migrate_disable(void)
+@@ -7223,6 +7223,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -73,15 +73,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
migrate_disable_update_cpus_allowed(p);
p->migrate_disable = 1;
-@@ -7281,12 +7282,15 @@ void migrate_enable(void)
+@@ -7288,11 +7289,13 @@ void migrate_enable(void)
arg.task = p;
arg.dest_cpu = dest_cpu;
+ unpin_current_cpu();
preempt_enable();
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
- tlb_migrate_finish(p->mm);
-+
return;
}
}
diff --git a/debian/patches-rt/hrtimer-Introduce-expiry-spin-lock.patch b/debian/patches-rt/hrtimer-Introduce-expiry-spin-lock.patch
new file mode 100644
index 000000000..ad9478dc6
--- /dev/null
+++ b/debian/patches-rt/hrtimer-Introduce-expiry-spin-lock.patch
@@ -0,0 +1,103 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Mon, 27 May 2019 16:54:04 +0200
+Subject: [PATCH] hrtimer: Introduce expiry spin lock
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+When deleting a hrtimer, it is possible, that the CPU has to spin, because the
+hrtimer is marked as running. This is done via cpu_relax() and repeating trying to
+delete the timer. When doing this in a virtual machine, the CPU wastes vcpu time
+because of spinning as long as the timer is no longer running.
+
+The spinning and time wasting, could be prevented by using PARAVIRT_SPINLOCKS
+and introducing a per timer base spin lock for expiry. The lock is hold during
+expiring the timers of a base. When the deletion of a timer wasn't successful,
+because the timer is running at the moment, the expiry lock is trying to
+accessed instead of cpu_realax(). The lock is already held by the CPU expring
+the timers, so the CPU could be scheduled out instead of spinning until the lock
+is released, because of the PARAVIRT_SPINLOCKS code. Thereby wasting time
+spinning around is prevented.
+
+The code isn't done conditionally on PARAVIRT_SPINLOCKS. The lock is taken only
+at two places. In one of them the lock is directly dropped after accessing
+it. So the probability for a slowpath when taking the lock is very low. But this
+keeps the code cleaner than introducing several CONFIG_PARAVIRT_SPINLOCKS
+dependend code paths and struct members.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hrtimer.h | 3 +++
+ kernel/time/hrtimer.c | 15 ++++++++++++++-
+ 2 files changed, 17 insertions(+), 1 deletion(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -183,6 +183,8 @@ enum hrtimer_base_type {
+ * @nr_retries: Total number of hrtimer interrupt retries
+ * @nr_hangs: Total number of hrtimer interrupt hangs
+ * @max_hang_time: Maximum time spent in hrtimer_interrupt
++ * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
++ * expired
+ * @expires_next: absolute time of the next event, is required for remote
+ * hrtimer enqueue; it is the total first expiry time (hard
+ * and soft hrtimer are taken into account)
+@@ -210,6 +212,7 @@ struct hrtimer_cpu_base {
+ unsigned short nr_hangs;
+ unsigned int max_hang_time;
+ #endif
++ spinlock_t softirq_expiry_lock;
+ ktime_t expires_next;
+ struct hrtimer *next_timer;
+ ktime_t softirq_expires_next;
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -930,6 +930,16 @@ u64 hrtimer_forward(struct hrtimer *time
+ }
+ EXPORT_SYMBOL_GPL(hrtimer_forward);
+
++static void hrtimer_grab_expiry_lock(const struct hrtimer *timer)
++{
++ struct hrtimer_clock_base *base = timer->base;
++
++ if (base && base->cpu_base) {
++ spin_lock(&base->cpu_base->softirq_expiry_lock);
++ spin_unlock(&base->cpu_base->softirq_expiry_lock);
++ }
++}
++
+ /*
+ * enqueue_hrtimer - internal function to (re)start a timer
+ *
+@@ -1162,7 +1172,7 @@ int hrtimer_cancel(struct hrtimer *timer
+
+ if (ret >= 0)
+ return ret;
+- cpu_relax();
++ hrtimer_grab_expiry_lock(timer);
+ }
+ }
+ EXPORT_SYMBOL_GPL(hrtimer_cancel);
+@@ -1459,6 +1469,7 @@ static __latent_entropy void hrtimer_run
+ unsigned long flags;
+ ktime_t now;
+
++ spin_lock(&cpu_base->softirq_expiry_lock);
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
+
+ now = hrtimer_update_base(cpu_base);
+@@ -1468,6 +1479,7 @@ static __latent_entropy void hrtimer_run
+ hrtimer_update_softirq_timer(cpu_base, true);
+
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
++ spin_unlock(&cpu_base->softirq_expiry_lock);
+ }
+
+ #ifdef CONFIG_HIGH_RES_TIMERS
+@@ -1809,6 +1821,7 @@ int hrtimers_prepare_cpu(unsigned int cp
+ cpu_base->softirq_next_timer = NULL;
+ cpu_base->expires_next = KTIME_MAX;
+ cpu_base->softirq_expires_next = KTIME_MAX;
++ spin_lock_init(&cpu_base->softirq_expiry_lock);
+ return 0;
+ }
+
diff --git a/debian/patches-rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch b/debian/patches-rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch
deleted file mode 100644
index df9ada6ce..000000000
--- a/debian/patches-rt/hrtimer-Move-schedule_work-call-to-helper-thread.patch
+++ /dev/null
@@ -1,85 +0,0 @@
-From: Yang Shi <yang.shi@windriver.com>
-Date: Mon, 16 Sep 2013 14:09:19 -0700
-Subject: hrtimer: Move schedule_work call to helper thread
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-When run ltp leapsec_timer test, the following call trace is caught:
-
-BUG: sleeping function called from invalid context at kernel/rtmutex.c:659
-in_atomic(): 1, irqs_disabled(): 1, pid: 0, name: swapper/1
-Preemption disabled at:[<ffffffff810857f3>] cpu_startup_entry+0x133/0x310
-
-CPU: 1 PID: 0 Comm: swapper/1 Not tainted 3.10.10-rt3 #2
-Hardware name: Intel Corporation Calpella platform/MATXM-CORE-411-B, BIOS 4.6.3 08/18/2010
-ffffffff81c2f800 ffff880076843e40 ffffffff8169918d ffff880076843e58
-ffffffff8106db31 ffff88007684b4a0 ffff880076843e70 ffffffff8169d9c0
-ffff88007684b4a0 ffff880076843eb0 ffffffff81059da1 0000001876851200
-Call Trace:
-<IRQ> [<ffffffff8169918d>] dump_stack+0x19/0x1b
-[<ffffffff8106db31>] __might_sleep+0xf1/0x170
-[<ffffffff8169d9c0>] rt_spin_lock+0x20/0x50
-[<ffffffff81059da1>] queue_work_on+0x61/0x100
-[<ffffffff81065aa1>] clock_was_set_delayed+0x21/0x30
-[<ffffffff810883be>] do_timer+0x40e/0x660
-[<ffffffff8108f487>] tick_do_update_jiffies64+0xf7/0x140
-[<ffffffff8108fe42>] tick_check_idle+0x92/0xc0
-[<ffffffff81044327>] irq_enter+0x57/0x70
-[<ffffffff816a040e>] smp_apic_timer_interrupt+0x3e/0x9b
-[<ffffffff8169f80a>] apic_timer_interrupt+0x6a/0x70
-<EOI> [<ffffffff8155ea1c>] ? cpuidle_enter_state+0x4c/0xc0
-[<ffffffff8155eb68>] cpuidle_idle_call+0xd8/0x2d0
-[<ffffffff8100b59e>] arch_cpu_idle+0xe/0x30
-[<ffffffff8108585e>] cpu_startup_entry+0x19e/0x310
-[<ffffffff8168efa2>] start_secondary+0x1ad/0x1b0
-
-The clock_was_set_delayed is called in hard IRQ handler (timer interrupt), which
-calls schedule_work.
-
-Under PREEMPT_RT_FULL, schedule_work calls spinlocks which could sleep, so it's
-not safe to call schedule_work in interrupt context.
-
-Reference upstream commit b68d61c705ef02384c0538b8d9374545097899ca
-(rt,ntp: Move call to schedule_delayed_work() to helper thread)
-from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git, which
-makes a similar change.
-
-Signed-off-by: Yang Shi <yang.shi@windriver.com>
-[bigeasy: use kthread_schedule_work() instead a helper thread]
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/hrtimer.c | 17 +++++++++++++++++
- 1 file changed, 17 insertions(+)
-
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -721,6 +721,22 @@ static void hrtimer_switch_to_hres(void)
- retrigger_next_event(NULL);
- }
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+
-+static void run_clock_set_delay(struct kthread_work *work)
-+{
-+ clock_was_set();
-+}
-+
-+static DEFINE_KTHREAD_WORK(clock_set_delay_work, run_clock_set_delay);
-+
-+void clock_was_set_delayed(void)
-+{
-+ kthread_schedule_work(&clock_set_delay_work);
-+}
-+
-+#else /* PREEMPT_RT_FULL */
-+
- static void clock_was_set_work(struct work_struct *work)
- {
- clock_was_set();
-@@ -736,6 +752,7 @@ void clock_was_set_delayed(void)
- {
- schedule_work(&hrtimer_work);
- }
-+#endif
-
- #else
-
diff --git a/debian/patches-rt/hrtimer-by-timers-by-default-into-the-softirq-context.patch b/debian/patches-rt/hrtimer-by-timers-by-default-into-the-softirq-context.patch
index a88c1fa55..e7322219c 100644
--- a/debian/patches-rt/hrtimer-by-timers-by-default-into-the-softirq-context.patch
+++ b/debian/patches-rt/hrtimer-by-timers-by-default-into-the-softirq-context.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 3 Jul 2009 08:44:31 -0500
Subject: hrtimer: by timers by default into the softirq context
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
We can't have hrtimers callbacks running in hardirq context on RT. Therefore
the timers are deferred to the softirq context by default.
@@ -28,18 +28,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
-@@ -2274,7 +2274,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
+@@ -2308,7 +2308,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
apic->vcpu = vcpu;
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS_PINNED);
+ HRTIMER_MODE_ABS_PINNED_HARD);
apic->lapic_timer.timer.function = apic_timer_fn;
-
- /*
+ if (timer_advance_ns == -1) {
+ apic->lapic_timer.timer_advance_ns = 1000;
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
-@@ -39,6 +39,7 @@ enum hrtimer_mode {
+@@ -38,6 +38,7 @@ enum hrtimer_mode {
HRTIMER_MODE_REL = 0x01,
HRTIMER_MODE_PINNED = 0x02,
HRTIMER_MODE_SOFT = 0x04,
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED,
HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED,
-@@ -49,6 +50,11 @@ enum hrtimer_mode {
+@@ -48,6 +49,11 @@ enum hrtimer_mode {
HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT,
HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT,
@@ -61,7 +61,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
-@@ -1102,7 +1102,7 @@ static void __perf_mux_hrtimer_init(stru
+@@ -1103,7 +1103,7 @@ static void __perf_mux_hrtimer_init(stru
cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
raw_spin_lock_init(&cpuctx->hrtimer_lock);
@@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
timer->function = perf_mux_hrtimer_handler;
}
-@@ -9212,7 +9212,7 @@ static void perf_swevent_init_hrtimer(st
+@@ -9488,7 +9488,7 @@ static void perf_swevent_init_hrtimer(st
if (!is_sampling_event(event))
return;
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -315,7 +315,7 @@ static void hrtick_rq_init(struct rq *rq
+@@ -317,7 +317,7 @@ static void hrtick_rq_init(struct rq *rq
rq->hrtick_csd.info = rq;
#endif
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#else /* CONFIG_SCHED_HRTICK */
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
-@@ -1054,7 +1054,7 @@ void init_dl_task_timer(struct sched_dl_
+@@ -1053,7 +1053,7 @@ void init_dl_task_timer(struct sched_dl_
{
struct hrtimer *timer = &dl_se->dl_timer;
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -4916,9 +4916,9 @@ void init_cfs_bandwidth(struct cfs_bandw
+@@ -4945,9 +4945,9 @@ void init_cfs_bandwidth(struct cfs_bandw
cfs_b->period = ns_to_ktime(default_cfs_period());
INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
@@ -130,7 +130,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1126,7 +1126,9 @@ void hrtimer_start_range_ns(struct hrtim
+@@ -1109,7 +1109,9 @@ void hrtimer_start_range_ns(struct hrtim
* Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft
* match.
*/
@@ -140,7 +140,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
base = lock_hrtimer_base(timer, &flags);
-@@ -1286,10 +1288,17 @@ static inline int hrtimer_clockid_to_bas
+@@ -1269,10 +1271,17 @@ static inline int hrtimer_clockid_to_bas
static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
enum hrtimer_mode mode)
{
@@ -160,7 +160,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
memset(timer, 0, sizeof(struct hrtimer));
cpu_base = raw_cpu_ptr(&hrtimer_bases);
-@@ -1672,6 +1681,14 @@ static void __hrtimer_init_sleeper(struc
+@@ -1656,6 +1665,14 @@ static void __hrtimer_init_sleeper(struc
enum hrtimer_mode mode,
struct task_struct *task)
{
@@ -188,7 +188,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
-@@ -1307,7 +1307,7 @@ void tick_setup_sched_timer(void)
+@@ -1327,7 +1327,7 @@ void tick_setup_sched_timer(void)
/*
* Emulate tick processing via per-CPU hrtimers:
*/
@@ -199,7 +199,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Get the next period (per-CPU) */
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
-@@ -483,7 +483,7 @@ static void watchdog_enable(unsigned int
+@@ -490,7 +490,7 @@ static void watchdog_enable(unsigned int
* Start the timer first to prevent the NMI watchdog triggering
* before the timer has a chance to fire.
*/
diff --git a/debian/patches-rt/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch b/debian/patches-rt/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
index 820f7fe1c..dd5f95947 100644
--- a/debian/patches-rt/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
+++ b/debian/patches-rt/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 3 Jul 2018 11:25:41 +0200
Subject: [PATCH v2] hrtimer: consolidate hrtimer_init() + hrtimer_init_sleeper() calls
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
hrtimer_init_sleeper() calls require a prior initialisation of the
hrtimer object with hrtimer_init(). Lets make the initialisation of
@@ -27,7 +27,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -3377,10 +3377,9 @@ static bool blk_mq_poll_hybrid_sleep(str
+@@ -3425,10 +3425,9 @@ static bool blk_mq_poll_hybrid_sleep(str
kt = nsecs;
mode = HRTIMER_MODE_REL;
@@ -58,7 +58,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
while (1) {
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
-@@ -361,10 +361,17 @@ DECLARE_PER_CPU(struct tick_device, tick
+@@ -364,10 +364,17 @@ DECLARE_PER_CPU(struct tick_device, tick
/* Initialize timers: */
extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
enum hrtimer_mode mode);
@@ -76,7 +76,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
extern void destroy_hrtimer_on_stack(struct hrtimer *timer);
#else
-@@ -374,6 +381,15 @@ static inline void hrtimer_init_on_stack
+@@ -377,6 +384,15 @@ static inline void hrtimer_init_on_stack
{
hrtimer_init(timer, which_clock, mode);
}
@@ -92,7 +92,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
#endif
-@@ -477,9 +493,6 @@ extern long hrtimer_nanosleep(const stru
+@@ -481,9 +497,6 @@ extern long hrtimer_nanosleep(const stru
const enum hrtimer_mode mode,
const clockid_t clockid);
@@ -117,7 +117,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
current->timer_slack_ns, \
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2692,10 +2692,9 @@ static int futex_wait(u32 __user *uaddr,
+@@ -2722,10 +2722,9 @@ static int futex_wait(u32 __user *uaddr,
if (abs_time) {
to = &timeout;
@@ -131,7 +131,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
-@@ -2794,9 +2793,8 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2824,9 +2823,8 @@ static int futex_lock_pi(u32 __user *uad
if (time) {
to = &timeout;
@@ -143,7 +143,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
hrtimer_set_expires(&to->timer, *time);
}
-@@ -3219,10 +3217,9 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3244,10 +3242,9 @@ static int futex_wait_requeue_pi(u32 __u
if (abs_time) {
to = &timeout;
@@ -159,7 +159,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
}
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1639,13 +1639,44 @@ static enum hrtimer_restart hrtimer_wake
+@@ -1651,13 +1651,44 @@ static enum hrtimer_restart hrtimer_wake
return HRTIMER_NORESTART;
}
@@ -205,7 +205,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
{
switch(restart->nanosleep.type) {
-@@ -1669,8 +1700,6 @@ static int __sched do_nanosleep(struct h
+@@ -1681,8 +1712,6 @@ static int __sched do_nanosleep(struct h
{
struct restart_block *restart;
@@ -214,7 +214,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
do {
set_current_state(TASK_INTERRUPTIBLE);
hrtimer_start_expires(&t->timer, mode);
-@@ -1707,10 +1736,9 @@ static long __sched hrtimer_nanosleep_re
+@@ -1719,10 +1748,9 @@ static long __sched hrtimer_nanosleep_re
struct hrtimer_sleeper t;
int ret;
@@ -227,7 +227,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
ret = do_nanosleep(&t, HRTIMER_MODE_ABS);
destroy_hrtimer_on_stack(&t.timer);
return ret;
-@@ -1728,7 +1756,7 @@ long hrtimer_nanosleep(const struct time
+@@ -1740,7 +1768,7 @@ long hrtimer_nanosleep(const struct time
if (dl_task(current) || rt_task(current))
slack = 0;
@@ -236,7 +236,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack);
ret = do_nanosleep(&t, mode);
if (ret != -ERESTART_RESTARTBLOCK)
-@@ -1927,11 +1955,9 @@ schedule_hrtimeout_range_clock(ktime_t *
+@@ -1940,11 +1968,9 @@ schedule_hrtimeout_range_clock(ktime_t *
return -EINTR;
}
@@ -251,7 +251,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
if (likely(t.task))
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
-@@ -2160,7 +2160,8 @@ static void spin(struct pktgen_dev *pkt_
+@@ -2154,7 +2154,8 @@ static void spin(struct pktgen_dev *pkt_
s64 remaining;
struct hrtimer_sleeper t;
@@ -261,7 +261,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
hrtimer_set_expires(&t.timer, spin_until);
remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer));
-@@ -2175,7 +2176,6 @@ static void spin(struct pktgen_dev *pkt_
+@@ -2169,7 +2170,6 @@ static void spin(struct pktgen_dev *pkt_
} while (ktime_compare(end_time, spin_until) < 0);
} else {
/* see do_nanosleep */
diff --git a/debian/patches-rt/hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch b/debian/patches-rt/hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch
index 55ae498fc..f82416a5d 100644
--- a/debian/patches-rt/hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch
+++ b/debian/patches-rt/hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 6 Dec 2018 10:15:13 +0100
Subject: [PATCH] hrtimer: move state change before hrtimer_cancel in
do_nanosleep()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
There is a small window between setting t->task to NULL and waking the
task up (which would set TASK_RUNNING). So the timer would fire, run and
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1769,12 +1769,12 @@ static int __sched do_nanosleep(struct h
+@@ -1736,12 +1736,12 @@ static int __sched do_nanosleep(struct h
if (likely(t->task))
freezable_schedule();
diff --git a/debian/patches-rt/hrtimers-prepare-full-preemption.patch b/debian/patches-rt/hrtimers-prepare-full-preemption.patch
deleted file mode 100644
index a37bd0a4e..000000000
--- a/debian/patches-rt/hrtimers-prepare-full-preemption.patch
+++ /dev/null
@@ -1,273 +0,0 @@
-From: Ingo Molnar <mingo@elte.hu>
-Date: Fri, 3 Jul 2009 08:29:34 -0500
-Subject: hrtimers: Prepare full preemption
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Make cancellation of a running callback in softirq context safe
-against preemption.
-
-Signed-off-by: Ingo Molnar <mingo@elte.hu>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- fs/timerfd.c | 5 ++++-
- include/linux/hrtimer.h | 13 ++++++++++++-
- include/linux/posix-timers.h | 2 +-
- kernel/time/alarmtimer.c | 2 +-
- kernel/time/hrtimer.c | 33 ++++++++++++++++++++++++++++++++-
- kernel/time/itimer.c | 1 +
- kernel/time/posix-timers.c | 39 +++++++++++++++++++++++++++++++++++++--
- 7 files changed, 88 insertions(+), 7 deletions(-)
-
---- a/fs/timerfd.c
-+++ b/fs/timerfd.c
-@@ -471,7 +471,10 @@ static int do_timerfd_settime(int ufd, i
- break;
- }
- spin_unlock_irq(&ctx->wqh.lock);
-- cpu_relax();
-+ if (isalarm(ctx))
-+ hrtimer_wait_for_timer(&ctx->t.alarm.timer);
-+ else
-+ hrtimer_wait_for_timer(&ctx->t.tmr);
- }
-
- /*
---- a/include/linux/hrtimer.h
-+++ b/include/linux/hrtimer.h
-@@ -19,6 +19,7 @@
- #include <linux/percpu.h>
- #include <linux/timer.h>
- #include <linux/timerqueue.h>
-+#include <linux/wait.h>
-
- struct hrtimer_clock_base;
- struct hrtimer_cpu_base;
-@@ -213,6 +214,9 @@ struct hrtimer_cpu_base {
- ktime_t expires_next;
- struct hrtimer *next_timer;
- ktime_t softirq_expires_next;
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ wait_queue_head_t wait;
-+#endif
- struct hrtimer *softirq_next_timer;
- struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
- } ____cacheline_aligned;
-@@ -430,6 +434,13 @@ static inline void hrtimer_restart(struc
- hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
- }
-
-+/* Softirq preemption could deadlock timer removal */
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ extern void hrtimer_wait_for_timer(const struct hrtimer *timer);
-+#else
-+# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0)
-+#endif
-+
- /* Query timers: */
- extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
-
-@@ -455,7 +466,7 @@ static inline int hrtimer_is_queued(stru
- * Helper function to check, whether the timer is running the callback
- * function
- */
--static inline int hrtimer_callback_running(struct hrtimer *timer)
-+static inline int hrtimer_callback_running(const struct hrtimer *timer)
- {
- return timer->base->running == timer;
- }
---- a/include/linux/posix-timers.h
-+++ b/include/linux/posix-timers.h
-@@ -114,8 +114,8 @@ struct k_itimer {
- struct {
- struct alarm alarmtimer;
- } alarm;
-- struct rcu_head rcu;
- } it;
-+ struct rcu_head rcu;
- };
-
- void run_posix_cpu_timers(struct task_struct *task);
---- a/kernel/time/alarmtimer.c
-+++ b/kernel/time/alarmtimer.c
-@@ -433,7 +433,7 @@ int alarm_cancel(struct alarm *alarm)
- int ret = alarm_try_to_cancel(alarm);
- if (ret >= 0)
- return ret;
-- cpu_relax();
-+ hrtimer_wait_for_timer(&alarm->timer);
- }
- }
- EXPORT_SYMBOL_GPL(alarm_cancel);
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -930,6 +930,33 @@ u64 hrtimer_forward(struct hrtimer *time
- }
- EXPORT_SYMBOL_GPL(hrtimer_forward);
-
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+# define wake_up_timer_waiters(b) wake_up(&(b)->wait)
-+
-+/**
-+ * hrtimer_wait_for_timer - Wait for a running timer
-+ *
-+ * @timer: timer to wait for
-+ *
-+ * The function waits in case the timers callback function is
-+ * currently executed on the waitqueue of the timer base. The
-+ * waitqueue is woken up after the timer callback function has
-+ * finished execution.
-+ */
-+void hrtimer_wait_for_timer(const struct hrtimer *timer)
-+{
-+ struct hrtimer_clock_base *base = timer->base;
-+
-+ if (base && base->cpu_base &&
-+ base->index >= HRTIMER_BASE_MONOTONIC_SOFT)
-+ wait_event(base->cpu_base->wait,
-+ !(hrtimer_callback_running(timer)));
-+}
-+
-+#else
-+# define wake_up_timer_waiters(b) do { } while (0)
-+#endif
-+
- /*
- * enqueue_hrtimer - internal function to (re)start a timer
- *
-@@ -1162,7 +1189,7 @@ int hrtimer_cancel(struct hrtimer *timer
-
- if (ret >= 0)
- return ret;
-- cpu_relax();
-+ hrtimer_wait_for_timer(timer);
- }
- }
- EXPORT_SYMBOL_GPL(hrtimer_cancel);
-@@ -1468,6 +1495,7 @@ static __latent_entropy void hrtimer_run
- hrtimer_update_softirq_timer(cpu_base, true);
-
- raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
-+ wake_up_timer_waiters(cpu_base);
- }
-
- #ifdef CONFIG_HIGH_RES_TIMERS
-@@ -1837,6 +1865,9 @@ int hrtimers_prepare_cpu(unsigned int cp
- cpu_base->softirq_next_timer = NULL;
- cpu_base->expires_next = KTIME_MAX;
- cpu_base->softirq_expires_next = KTIME_MAX;
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ init_waitqueue_head(&cpu_base->wait);
-+#endif
- return 0;
- }
-
---- a/kernel/time/itimer.c
-+++ b/kernel/time/itimer.c
-@@ -213,6 +213,7 @@ int do_setitimer(int which, struct itime
- /* We are sharing ->siglock with it_real_fn() */
- if (hrtimer_try_to_cancel(timer) < 0) {
- spin_unlock_irq(&tsk->sighand->siglock);
-+ hrtimer_wait_for_timer(&tsk->signal->real_timer);
- goto again;
- }
- expires = timeval_to_ktime(value->it_value);
---- a/kernel/time/posix-timers.c
-+++ b/kernel/time/posix-timers.c
-@@ -442,7 +442,7 @@ static struct k_itimer * alloc_posix_tim
-
- static void k_itimer_rcu_free(struct rcu_head *head)
- {
-- struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu);
-+ struct k_itimer *tmr = container_of(head, struct k_itimer, rcu);
-
- kmem_cache_free(posix_timers_cache, tmr);
- }
-@@ -459,7 +459,7 @@ static void release_posix_timer(struct k
- }
- put_pid(tmr->it_pid);
- sigqueue_free(tmr->sigq);
-- call_rcu(&tmr->it.rcu, k_itimer_rcu_free);
-+ call_rcu(&tmr->rcu, k_itimer_rcu_free);
- }
-
- static int common_timer_create(struct k_itimer *new_timer)
-@@ -800,6 +800,22 @@ static void common_hrtimer_arm(struct k_
- hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
- }
-
-+/*
-+ * Protected by RCU!
-+ */
-+static void timer_wait_for_callback(const struct k_clock *kc, struct k_itimer *timr)
-+{
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ if (kc->timer_arm == common_hrtimer_arm)
-+ hrtimer_wait_for_timer(&timr->it.real.timer);
-+ else if (kc == &alarm_clock)
-+ hrtimer_wait_for_timer(&timr->it.alarm.alarmtimer.timer);
-+ else
-+ /* FIXME: Whacky hack for posix-cpu-timers */
-+ schedule_timeout(1);
-+#endif
-+}
-+
- static int common_hrtimer_try_to_cancel(struct k_itimer *timr)
- {
- return hrtimer_try_to_cancel(&timr->it.real.timer);
-@@ -864,6 +880,7 @@ static int do_timer_settime(timer_t time
- if (!timr)
- return -EINVAL;
-
-+ rcu_read_lock();
- kc = timr->kclock;
- if (WARN_ON_ONCE(!kc || !kc->timer_set))
- error = -EINVAL;
-@@ -872,9 +889,12 @@ static int do_timer_settime(timer_t time
-
- unlock_timer(timr, flag);
- if (error == TIMER_RETRY) {
-+ timer_wait_for_callback(kc, timr);
- old_spec64 = NULL; // We already got the old time...
-+ rcu_read_unlock();
- goto retry;
- }
-+ rcu_read_unlock();
-
- return error;
- }
-@@ -956,10 +976,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t
- if (!timer)
- return -EINVAL;
-
-+ rcu_read_lock();
- if (timer_delete_hook(timer) == TIMER_RETRY) {
- unlock_timer(timer, flags);
-+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
-+ timer);
-+ rcu_read_unlock();
- goto retry_delete;
- }
-+ rcu_read_unlock();
-
- spin_lock(&current->sighand->siglock);
- list_del(&timer->list);
-@@ -985,8 +1010,18 @@ static void itimer_delete(struct k_itime
- retry_delete:
- spin_lock_irqsave(&timer->it_lock, flags);
-
-+ /* On RT we can race with a deletion */
-+ if (!timer->it_signal) {
-+ unlock_timer(timer, flags);
-+ return;
-+ }
-+
- if (timer_delete_hook(timer) == TIMER_RETRY) {
-+ rcu_read_lock();
- unlock_timer(timer, flags);
-+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
-+ timer);
-+ rcu_read_unlock();
- goto retry_delete;
- }
- list_del(&timer->list);
diff --git a/debian/patches-rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/debian/patches-rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
deleted file mode 100644
index 27459290b..000000000
--- a/debian/patches-rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
+++ /dev/null
@@ -1,147 +0,0 @@
-Subject: genirq: Allow disabling of softirq processing in irq thread context
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Tue, 31 Jan 2012 13:01:27 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-The processing of softirqs in irq thread context is a performance gain
-for the non-rt workloads of a system, but it's counterproductive for
-interrupts which are explicitely related to the realtime
-workload. Allow such interrupts to prevent softirq processing in their
-thread context.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- include/linux/interrupt.h | 2 ++
- include/linux/irq.h | 4 +++-
- kernel/irq/manage.c | 13 ++++++++++++-
- kernel/irq/settings.h | 12 ++++++++++++
- kernel/softirq.c | 9 +++++++++
- 5 files changed, 38 insertions(+), 2 deletions(-)
-
---- a/include/linux/interrupt.h
-+++ b/include/linux/interrupt.h
-@@ -62,6 +62,7 @@
- * interrupt handler after suspending interrupts. For system
- * wakeup devices users need to implement wakeup detection in
- * their interrupt handlers.
-+ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
- */
- #define IRQF_SHARED 0x00000080
- #define IRQF_PROBE_SHARED 0x00000100
-@@ -75,6 +76,7 @@
- #define IRQF_NO_THREAD 0x00010000
- #define IRQF_EARLY_RESUME 0x00020000
- #define IRQF_COND_SUSPEND 0x00040000
-+#define IRQF_NO_SOFTIRQ_CALL 0x00080000
-
- #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
-
---- a/include/linux/irq.h
-+++ b/include/linux/irq.h
-@@ -70,6 +70,7 @@ enum irqchip_irq_state;
- * IRQ_IS_POLLED - Always polled by another interrupt. Exclude
- * it from the spurious interrupt detection
- * mechanism and from core side polling.
-+ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
- * IRQ_DISABLE_UNLAZY - Disable lazy irq disable
- */
- enum {
-@@ -97,13 +98,14 @@ enum {
- IRQ_PER_CPU_DEVID = (1 << 17),
- IRQ_IS_POLLED = (1 << 18),
- IRQ_DISABLE_UNLAZY = (1 << 19),
-+ IRQ_NO_SOFTIRQ_CALL = (1 << 20),
- };
-
- #define IRQF_MODIFY_MASK \
- (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
- IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
- IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
-- IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
-+ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL)
-
- #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
-
---- a/kernel/irq/manage.c
-+++ b/kernel/irq/manage.c
-@@ -962,7 +962,15 @@ irq_forced_thread_fn(struct irq_desc *de
- atomic_inc(&desc->threads_handled);
-
- irq_finalize_oneshot(desc, action);
-- local_bh_enable();
-+ /*
-+ * Interrupts which have real time requirements can be set up
-+ * to avoid softirq processing in the thread handler. This is
-+ * safe as these interrupts do not raise soft interrupts.
-+ */
-+ if (irq_settings_no_softirq_call(desc))
-+ _local_bh_enable();
-+ else
-+ local_bh_enable();
- return ret;
- }
-
-@@ -1472,6 +1480,9 @@ static int
- irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
- }
-
-+ if (new->flags & IRQF_NO_SOFTIRQ_CALL)
-+ irq_settings_set_no_softirq_call(desc);
-+
- if (irq_settings_can_autoenable(desc)) {
- irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
- } else {
---- a/kernel/irq/settings.h
-+++ b/kernel/irq/settings.h
-@@ -17,6 +17,7 @@ enum {
- _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
- _IRQ_IS_POLLED = IRQ_IS_POLLED,
- _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY,
-+ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL,
- _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
- };
-
-@@ -31,6 +32,7 @@ enum {
- #define IRQ_PER_CPU_DEVID GOT_YOU_MORON
- #define IRQ_IS_POLLED GOT_YOU_MORON
- #define IRQ_DISABLE_UNLAZY GOT_YOU_MORON
-+#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON
- #undef IRQF_MODIFY_MASK
- #define IRQF_MODIFY_MASK GOT_YOU_MORON
-
-@@ -41,6 +43,16 @@ irq_settings_clr_and_set(struct irq_desc
- desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
- }
-
-+static inline bool irq_settings_no_softirq_call(struct irq_desc *desc)
-+{
-+ return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL;
-+}
-+
-+static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc)
-+{
-+ desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL;
-+}
-+
- static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
- {
- return desc->status_use_accessors & _IRQ_PER_CPU;
---- a/kernel/softirq.c
-+++ b/kernel/softirq.c
-@@ -627,6 +627,15 @@ void __local_bh_enable(void)
- }
- EXPORT_SYMBOL(__local_bh_enable);
-
-+void _local_bh_enable(void)
-+{
-+ if (WARN_ON(current->softirq_nestcnt == 0))
-+ return;
-+ if (--current->softirq_nestcnt == 0)
-+ migrate_enable();
-+}
-+EXPORT_SYMBOL(_local_bh_enable);
-+
- int in_serving_softirq(void)
- {
- return current->flags & PF_IN_SOFTIRQ;
diff --git a/debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch b/debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch
index 0096c7452..e7ebb59a6 100644
--- a/debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch
+++ b/debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch
@@ -1,7 +1,7 @@
Subject: irqwork: push most work into softirq context
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 23 Jun 2015 15:32:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Initially we defered all irqwork into softirq because we didn't want the
latency spikes if perf or another user was busy and delayed the RT task.
@@ -23,12 +23,12 @@ Mike Galbraith,
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/irq_work.h | 8 ++++++
- kernel/irq_work.c | 60 ++++++++++++++++++++++++++++++++++++-----------
+ kernel/irq_work.c | 59 +++++++++++++++++++++++++++++++++++++----------
kernel/rcu/tree.c | 1
kernel/sched/topology.c | 1
kernel/time/tick-sched.c | 1
kernel/time/timer.c | 2 +
- 6 files changed, 60 insertions(+), 13 deletions(-)
+ 6 files changed, 60 insertions(+), 12 deletions(-)
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* _LINUX_IRQ_WORK_H */
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
-@@ -17,6 +17,7 @@
+@@ -18,6 +18,7 @@
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/smp.h>
@@ -62,52 +62,22 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <asm/processor.h>
-@@ -64,6 +65,8 @@ void __weak arch_irq_work_raise(void)
- */
- bool irq_work_queue_on(struct irq_work *work, int cpu)
- {
-+ struct llist_head *list;
-+
- /* All work should have been flushed before going offline */
- WARN_ON_ONCE(cpu_is_offline(cpu));
-
-@@ -76,7 +79,12 @@ bool irq_work_queue_on(struct irq_work *
- if (!irq_work_claim(work))
- return false;
-
-- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ))
-+ list = &per_cpu(lazy_list, cpu);
-+ else
-+ list = &per_cpu(raised_list, cpu);
-+
-+ if (llist_add(&work->llnode, list))
- arch_send_call_function_single_ipi(cpu);
-
- #else /* #ifdef CONFIG_SMP */
-@@ -89,6 +97,9 @@ bool irq_work_queue_on(struct irq_work *
- /* Enqueue the irq work @work on the current CPU */
- bool irq_work_queue(struct irq_work *work)
+@@ -60,13 +61,19 @@ void __weak arch_irq_work_raise(void)
+ /* Enqueue on current CPU, work must already be claimed and preempt disabled */
+ static void __irq_work_queue_local(struct irq_work *work)
{
+ struct llist_head *list;
+ bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
+
- /* Only queue if not already pending */
- if (!irq_work_claim(work))
- return false;
-@@ -96,13 +107,15 @@ bool irq_work_queue(struct irq_work *wor
- /* Queue the entry and raise the IPI if needed. */
- preempt_disable();
-
-- /* If the work is "lazy", handle it from next tick if any */
++ lazy_work = work->flags & IRQ_WORK_LAZY;
++
+ /* If the work is "lazy", handle it from next tick if any */
- if (work->flags & IRQ_WORK_LAZY) {
- if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
- tick_nohz_tick_stopped())
- arch_irq_work_raise();
- } else {
- if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
-+ lazy_work = work->flags & IRQ_WORK_LAZY;
-+
+ if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ)))
+ list = this_cpu_ptr(&lazy_list);
+ else
@@ -117,8 +87,26 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ if (!lazy_work || tick_nohz_tick_stopped())
arch_irq_work_raise();
}
+ }
+@@ -108,9 +115,16 @@ bool irq_work_queue_on(struct irq_work *
-@@ -119,9 +132,8 @@ bool irq_work_needs_cpu(void)
+ preempt_disable();
+ if (cpu != smp_processor_id()) {
++ struct llist_head *list;
++
+ /* Arch remote IPI send/receive backend aren't NMI safe */
+ WARN_ON_ONCE(in_nmi());
+- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
++ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ))
++ list = &per_cpu(lazy_list, cpu);
++ else
++ list = &per_cpu(raised_list, cpu);
++
++ if (llist_add(&work->llnode, list))
+ arch_send_call_function_single_ipi(cpu);
+ } else {
+ __irq_work_queue_local(work);
+@@ -129,9 +143,8 @@ bool irq_work_needs_cpu(void)
raised = this_cpu_ptr(&raised_list);
lazy = this_cpu_ptr(&lazy_list);
@@ -130,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* All work should have been flushed before going offline */
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
-@@ -135,8 +147,12 @@ static void irq_work_run_list(struct lli
+@@ -145,8 +158,12 @@ static void irq_work_run_list(struct lli
struct llist_node *llnode;
unsigned long flags;
@@ -144,7 +132,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (llist_empty(list))
return;
-@@ -168,7 +184,16 @@ static void irq_work_run_list(struct lli
+@@ -178,7 +195,16 @@ static void irq_work_run_list(struct lli
void irq_work_run(void)
{
irq_work_run_list(this_cpu_ptr(&raised_list));
@@ -162,7 +150,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL_GPL(irq_work_run);
-@@ -178,8 +203,17 @@ void irq_work_tick(void)
+@@ -188,8 +214,17 @@ void irq_work_tick(void)
if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
irq_work_run_list(raised);
@@ -182,7 +170,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Synchronize against the irq_work @entry, ensures the entry is not
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -1152,6 +1152,7 @@ static int rcu_implicit_dynticks_qs(stru
+@@ -1074,6 +1074,7 @@ static int rcu_implicit_dynticks_qs(stru
!rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
(rnp->ffmask & rdp->grpmask)) {
init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
@@ -192,7 +180,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
-@@ -473,6 +473,7 @@ static int init_rootdomain(struct root_d
+@@ -502,6 +502,7 @@ static int init_rootdomain(struct root_d
rd->rto_cpu = -1;
raw_spin_lock_init(&rd->rto_lock);
init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
@@ -202,7 +190,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
init_dl_bw(&rd->dl_bw);
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
-@@ -229,6 +229,7 @@ static void nohz_full_kick_func(struct i
+@@ -235,6 +235,7 @@ static void nohz_full_kick_func(struct i
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
.func = nohz_full_kick_func,
@@ -212,7 +200,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1723,6 +1723,8 @@ static __latent_entropy void run_timer_s
+@@ -1727,6 +1727,8 @@ static __latent_entropy void run_timer_s
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
diff --git a/debian/patches-rt/jump-label-rt.patch b/debian/patches-rt/jump-label-rt.patch
index 486ebb22f..74f63c2b3 100644
--- a/debian/patches-rt/jump-label-rt.patch
+++ b/debian/patches-rt/jump-label-rt.patch
@@ -1,7 +1,7 @@
Subject: jump-label: disable if stop_machine() is used
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 08 Jul 2015 17:14:48 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Some architectures are using stop_machine() while switching the opcode which
leads to latency spikes.
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -52,7 +52,7 @@ config ARM
+@@ -57,7 +57,7 @@ config ARM
select HARDIRQS_SW_RESEND
select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
diff --git a/debian/patches-rt/kconfig-disable-a-few-options-rt.patch b/debian/patches-rt/kconfig-disable-a-few-options-rt.patch
index 1d6ecc94a..3a2cb3559 100644
--- a/debian/patches-rt/kconfig-disable-a-few-options-rt.patch
+++ b/debian/patches-rt/kconfig-disable-a-few-options-rt.patch
@@ -1,7 +1,7 @@
Subject: kconfig: Disable config options which are not RT compatible
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 24 Jul 2011 12:11:43 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Disable stuff which is known to have issues on RT
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
help
--- a/mm/Kconfig
+++ b/mm/Kconfig
-@@ -372,7 +372,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
+@@ -373,7 +373,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
config TRANSPARENT_HUGEPAGE
bool "Transparent Hugepage Support"
diff --git a/debian/patches-rt/kconfig-preempt-rt-full.patch b/debian/patches-rt/kconfig-preempt-rt-full.patch
index 323ebc385..6bbb5a64e 100644
--- a/debian/patches-rt/kconfig-preempt-rt-full.patch
+++ b/debian/patches-rt/kconfig-preempt-rt-full.patch
@@ -1,7 +1,7 @@
Subject: kconfig: Add PREEMPT_RT_FULL
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 29 Jun 2011 14:58:57 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Introduce the final symbol for PREEMPT_RT_FULL.
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
-@@ -69,6 +69,14 @@ config PREEMPT_RTB
+@@ -70,6 +70,14 @@ config PREEMPT_RTB
enables changes which are preliminary for the full preemptible
RT kernel.
diff --git a/debian/patches-rt/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/debian/patches-rt/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
index 598a3f689..3847d2c88 100644
--- a/debian/patches-rt/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
+++ b/debian/patches-rt/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
@@ -4,7 +4,7 @@ Subject: [PATCH] kernel: sched: Provide a pointer to the valid CPU mask
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
In commit 4b53a3412d66 ("sched/core: Remove the tsk_nr_cpus_allowed()
wrapper") the tsk_nr_cpus_allowed() wrapper was removed. There was not
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
-@@ -1824,7 +1824,7 @@ format_mca_init_stack(void *mca_data, un
+@@ -1831,7 +1831,7 @@ format_mca_init_stack(void *mca_data, un
ti->cpu = cpu;
p->stack = ti;
p->state = TASK_UNINTERRUPTIBLE;
@@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
set_thread_flag(TIF_FPUBOUND);
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
-@@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_
+@@ -128,7 +128,7 @@ void __spu_update_sched_info(struct spu_
* runqueue. The context will be rescheduled on the proper node
* if it is timesliced or preempted.
*/
@@ -155,7 +155,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ctx->last_ran = raw_smp_processor_id();
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
-@@ -1510,7 +1510,7 @@ static int pseudo_lock_dev_mmap(struct f
+@@ -1503,7 +1503,7 @@ static int pseudo_lock_dev_mmap(struct f
* may be scheduled elsewhere and invalidate entries in the
* pseudo-locked region.
*/
@@ -166,7 +166,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
-@@ -1037,7 +1037,7 @@ int hfi1_get_proc_affinity(int node)
+@@ -1038,7 +1038,7 @@ int hfi1_get_proc_affinity(int node)
struct hfi1_affinity_node *entry;
cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
const struct cpumask *node_mask,
@@ -175,7 +175,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct hfi1_affinity_node_list *affinity = &node_affinity;
struct cpu_mask_set *set = &affinity->proc;
-@@ -1045,7 +1045,7 @@ int hfi1_get_proc_affinity(int node)
+@@ -1046,7 +1046,7 @@ int hfi1_get_proc_affinity(int node)
* check whether process/context affinity has already
* been set
*/
@@ -184,7 +184,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
current->pid, current->comm,
cpumask_pr_args(proc_mask));
-@@ -1056,7 +1056,7 @@ int hfi1_get_proc_affinity(int node)
+@@ -1057,7 +1057,7 @@ int hfi1_get_proc_affinity(int node)
cpu = cpumask_first(proc_mask);
cpumask_set_cpu(cpu, &set->used);
goto done;
@@ -195,7 +195,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cpumask_pr_args(proc_mask));
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
-@@ -855,14 +855,13 @@ struct sdma_engine *sdma_select_user_eng
+@@ -869,14 +869,13 @@ struct sdma_engine *sdma_select_user_eng
{
struct sdma_rht_node *rht_node;
struct sdma_engine *sde = NULL;
@@ -250,7 +250,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -659,7 +659,8 @@ struct task_struct {
+@@ -651,7 +651,8 @@ struct task_struct {
unsigned int policy;
int nr_cpus_allowed;
@@ -260,18 +260,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
-@@ -1401,7 +1402,7 @@ extern struct pid *cad_pid;
+@@ -1402,7 +1403,7 @@ extern struct pid *cad_pid;
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */
#define PF_UMH 0x02000000 /* I'm an Usermodehelper process */
-#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
-+#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
++#define PF_NO_SETAFFINITY 0x04000000 /* serland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
- #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
+ #define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
--- a/init/init_task.c
+++ b/init/init_task.c
-@@ -71,7 +71,8 @@ struct task_struct init_task
+@@ -72,7 +72,8 @@ struct task_struct init_task
.static_prio = MAX_PRIO - 20,
.normal_prio = MAX_PRIO - 20,
.policy = SCHED_NORMAL,
@@ -283,7 +283,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
.active_mm = &init_mm,
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
-@@ -2815,7 +2815,7 @@ static void cpuset_fork(struct task_stru
+@@ -2829,7 +2829,7 @@ static void cpuset_fork(struct task_stru
if (task_css_is_root(task, cpuset_cgrp_id))
return;
@@ -294,7 +294,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -891,6 +891,8 @@ static struct task_struct *dup_task_stru
+@@ -898,6 +898,8 @@ static struct task_struct *dup_task_stru
#ifdef CONFIG_STACKPROTECTOR
tsk->stack_canary = get_random_canary();
#endif
@@ -305,7 +305,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* One for us, one for whoever does the "release_task()" (usually
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -896,7 +896,7 @@ static inline bool is_per_cpu_kthread(st
+@@ -930,7 +930,7 @@ static inline bool is_per_cpu_kthread(st
*/
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
{
@@ -314,7 +314,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return false;
if (is_per_cpu_kthread(p))
-@@ -991,7 +991,7 @@ static int migration_cpu_stop(void *data
+@@ -1025,7 +1025,7 @@ static int migration_cpu_stop(void *data
local_irq_disable();
/*
* We need to explicitly wake pending tasks before running
@@ -323,7 +323,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
*/
sched_ttwu_pending();
-@@ -1022,7 +1022,7 @@ static int migration_cpu_stop(void *data
+@@ -1056,7 +1056,7 @@ static int migration_cpu_stop(void *data
*/
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
{
@@ -332,7 +332,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
-@@ -1092,7 +1092,7 @@ static int __set_cpus_allowed_ptr(struct
+@@ -1126,7 +1126,7 @@ static int __set_cpus_allowed_ptr(struct
goto out;
}
@@ -341,7 +341,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
-@@ -1255,10 +1255,10 @@ static int migrate_swap_stop(void *data)
+@@ -1286,10 +1286,10 @@ static int migrate_swap_stop(void *data)
if (task_cpu(arg->src_task) != arg->src_cpu)
goto unlock;
@@ -354,7 +354,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto unlock;
__migrate_swap_task(arg->src_task, arg->dst_cpu);
-@@ -1300,10 +1300,10 @@ int migrate_swap(struct task_struct *cur
+@@ -1331,10 +1331,10 @@ int migrate_swap(struct task_struct *cur
if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
goto out;
@@ -367,7 +367,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
-@@ -1448,7 +1448,7 @@ void kick_process(struct task_struct *p)
+@@ -1479,7 +1479,7 @@ void kick_process(struct task_struct *p)
EXPORT_SYMBOL_GPL(kick_process);
/*
@@ -376,7 +376,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* A few notes on cpu_active vs cpu_online:
*
-@@ -1488,14 +1488,14 @@ static int select_fallback_rq(int cpu, s
+@@ -1519,14 +1519,14 @@ static int select_fallback_rq(int cpu, s
for_each_cpu(dest_cpu, nodemask) {
if (!cpu_active(dest_cpu))
continue;
@@ -393,7 +393,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!is_cpu_allowed(p, dest_cpu))
continue;
-@@ -1539,7 +1539,7 @@ static int select_fallback_rq(int cpu, s
+@@ -1570,7 +1570,7 @@ static int select_fallback_rq(int cpu, s
}
/*
@@ -402,7 +402,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
static inline
int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
-@@ -1549,11 +1549,11 @@ int select_task_rq(struct task_struct *p
+@@ -1580,11 +1580,11 @@ int select_task_rq(struct task_struct *p
if (p->nr_cpus_allowed > 1)
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
else
@@ -416,7 +416,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* CPU.
*
* Since this is common to all placement strategies, this lives here.
-@@ -2421,7 +2421,7 @@ void wake_up_new_task(struct task_struct
+@@ -2395,7 +2395,7 @@ void wake_up_new_task(struct task_struct
#ifdef CONFIG_SMP
/*
* Fork balancing, do it here and not earlier because:
@@ -425,7 +425,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* - any previously selected CPU might disappear through hotplug
*
* Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
-@@ -4287,7 +4287,7 @@ static int __sched_setscheduler(struct t
+@@ -4270,7 +4270,7 @@ static int __sched_setscheduler(struct t
* the entire root_domain to become SCHED_DEADLINE. We
* will also fail if there's no bandwidth available.
*/
@@ -434,7 +434,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rq->rd->dl_bw.bw == 0) {
task_rq_unlock(rq, p, &rf);
return -EPERM;
-@@ -4886,7 +4886,7 @@ long sched_getaffinity(pid_t pid, struct
+@@ -4869,7 +4869,7 @@ long sched_getaffinity(pid_t pid, struct
goto out_unlock;
raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -443,7 +443,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out_unlock:
-@@ -5464,7 +5464,7 @@ int task_can_attach(struct task_struct *
+@@ -5446,7 +5446,7 @@ int task_can_attach(struct task_struct *
* allowed nodes is unnecessary. Thus, cpusets are not
* applicable for such threads. This prevents checking for
* success of set_cpus_allowed_ptr() on all attached tasks
@@ -452,7 +452,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
if (p->flags & PF_NO_SETAFFINITY) {
ret = -EINVAL;
-@@ -5491,7 +5491,7 @@ int migrate_task_to(struct task_struct *
+@@ -5473,7 +5473,7 @@ int migrate_task_to(struct task_struct *
if (curr_cpu == target_cpu)
return 0;
@@ -461,7 +461,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return -EINVAL;
/* TODO: This is not properly updating schedstats */
-@@ -5629,7 +5629,7 @@ static void migrate_tasks(struct rq *dea
+@@ -5611,7 +5611,7 @@ static void migrate_tasks(struct rq *dea
put_prev_task(rq, next);
/*
@@ -472,7 +472,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
-@@ -124,14 +124,14 @@ int cpudl_find(struct cpudl *cp, struct
+@@ -120,14 +120,14 @@ int cpudl_find(struct cpudl *cp, struct
const struct sched_dl_entity *dl_se = &p->dl;
if (later_mask &&
@@ -491,7 +491,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cpumask_set_cpu(best_cpu, later_mask);
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
-@@ -98,11 +98,11 @@ int cpupri_find(struct cpupri *cp, struc
+@@ -94,11 +94,11 @@ int cpupri_find(struct cpupri *cp, struc
if (skip)
continue;
@@ -507,7 +507,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* We have to ensure that we have at least one bit
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
-@@ -539,7 +539,7 @@ static struct rq *dl_task_offline_migrat
+@@ -538,7 +538,7 @@ static struct rq *dl_task_offline_migrat
* If we cannot preempt any rq, fall back to pick any
* online CPU:
*/
@@ -516,7 +516,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (cpu >= nr_cpu_ids) {
/*
* Failed to find any suitable CPU.
-@@ -1825,7 +1825,7 @@ static void set_curr_task_dl(struct rq *
+@@ -1824,7 +1824,7 @@ static void set_curr_task_dl(struct rq *
static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
@@ -525,7 +525,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 1;
return 0;
}
-@@ -1975,7 +1975,7 @@ static struct rq *find_lock_later_rq(str
+@@ -1974,7 +1974,7 @@ static struct rq *find_lock_later_rq(str
/* Retry if something changed. */
if (double_lock_balance(rq, later_rq)) {
if (unlikely(task_rq(task) != rq ||
@@ -536,7 +536,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
!task_on_rq_queued(task))) {
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -1608,7 +1608,7 @@ static void task_numa_compare(struct tas
+@@ -1621,7 +1621,7 @@ static void task_numa_compare(struct tas
* be incurred if the tasks were swapped.
*/
/* Skip this swap candidate if cannot move to the source cpu */
@@ -545,7 +545,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto unlock;
/*
-@@ -1705,7 +1705,7 @@ static void task_numa_find_cpu(struct ta
+@@ -1718,7 +1718,7 @@ static void task_numa_find_cpu(struct ta
for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
/* Skip this CPU if the source task cannot migrate */
@@ -554,7 +554,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
env->dst_cpu = cpu;
-@@ -5785,7 +5785,7 @@ find_idlest_group(struct sched_domain *s
+@@ -5831,7 +5831,7 @@ find_idlest_group(struct sched_domain *s
/* Skip over this group if it has no CPUs allowed */
if (!cpumask_intersects(sched_group_span(group),
@@ -563,7 +563,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
local_group = cpumask_test_cpu(this_cpu,
-@@ -5917,7 +5917,7 @@ find_idlest_group_cpu(struct sched_group
+@@ -5963,7 +5963,7 @@ find_idlest_group_cpu(struct sched_group
return cpumask_first(sched_group_span(group));
/* Traverse only the allowed CPUs */
@@ -572,7 +572,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (available_idle_cpu(i)) {
struct rq *rq = cpu_rq(i);
struct cpuidle_state *idle = idle_get_state(rq);
-@@ -5957,7 +5957,7 @@ static inline int find_idlest_cpu(struct
+@@ -6003,7 +6003,7 @@ static inline int find_idlest_cpu(struct
{
int new_cpu = cpu;
@@ -581,7 +581,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return prev_cpu;
/*
-@@ -6074,7 +6074,7 @@ static int select_idle_core(struct task_
+@@ -6120,7 +6120,7 @@ static int select_idle_core(struct task_
if (!test_idle_cores(target, false))
return -1;
@@ -590,7 +590,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_cpu_wrap(core, cpus, target) {
bool idle = true;
-@@ -6108,7 +6108,7 @@ static int select_idle_smt(struct task_s
+@@ -6154,7 +6154,7 @@ static int select_idle_smt(struct task_s
return -1;
for_each_cpu(cpu, cpu_smt_mask(target)) {
@@ -599,7 +599,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
if (available_idle_cpu(cpu))
return cpu;
-@@ -6171,7 +6171,7 @@ static int select_idle_cpu(struct task_s
+@@ -6217,7 +6217,7 @@ static int select_idle_cpu(struct task_s
for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
if (!--nr)
return -1;
@@ -608,7 +608,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
if (available_idle_cpu(cpu))
break;
-@@ -6208,7 +6208,7 @@ static int select_idle_sibling(struct ta
+@@ -6254,7 +6254,7 @@ static int select_idle_sibling(struct ta
recent_used_cpu != target &&
cpus_share_cache(recent_used_cpu, target) &&
available_idle_cpu(recent_used_cpu) &&
@@ -617,7 +617,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Replace recent_used_cpu with prev as it is a potential
* candidate for the next wake:
-@@ -6554,7 +6554,7 @@ static int find_energy_efficient_cpu(str
+@@ -6600,7 +6600,7 @@ static int find_energy_efficient_cpu(str
int max_spare_cap_cpu = -1;
for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) {
@@ -626,7 +626,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
/* Skip CPUs that will be overutilized. */
-@@ -6643,7 +6643,7 @@ select_task_rq_fair(struct task_struct *
+@@ -6689,7 +6689,7 @@ select_task_rq_fair(struct task_struct *
}
want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) &&
@@ -635,7 +635,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
rcu_read_lock();
-@@ -7393,14 +7393,14 @@ int can_migrate_task(struct task_struct
+@@ -7445,14 +7445,14 @@ int can_migrate_task(struct task_struct
/*
* We do not migrate tasks that are:
* 1) throttled_lb_pair, or
@@ -652,7 +652,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int cpu;
schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
-@@ -7420,7 +7420,7 @@ int can_migrate_task(struct task_struct
+@@ -7472,7 +7472,7 @@ int can_migrate_task(struct task_struct
/* Prevent to re-select dst_cpu via env's CPUs: */
for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
@@ -661,7 +661,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
env->flags |= LBF_DST_PINNED;
env->new_dst_cpu = cpu;
break;
-@@ -8017,7 +8017,7 @@ check_cpu_capacity(struct rq *rq, struct
+@@ -8099,7 +8099,7 @@ static inline int check_misfit_status(st
/*
* Group imbalance indicates (and tries to solve) the problem where balancing
@@ -670,7 +670,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
* cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
-@@ -8688,7 +8688,7 @@ static struct sched_group *find_busiest_
+@@ -8768,7 +8768,7 @@ static struct sched_group *find_busiest_
/*
* If the busiest group is imbalanced the below checks don't
* work because they assume all things are equal, which typically
@@ -679,7 +679,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
if (busiest->group_type == group_imbalanced)
goto force_balance;
-@@ -9116,7 +9116,7 @@ static int load_balance(int this_cpu, st
+@@ -9210,7 +9210,7 @@ static int load_balance(int this_cpu, st
* if the curr task on busiest CPU can't be
* moved to this_cpu:
*/
@@ -721,7 +721,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
get_online_cpus();
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
-@@ -22,7 +22,7 @@ notrace static unsigned int check_preemp
+@@ -23,7 +23,7 @@ unsigned int check_preemption_disabled(c
* Kernel threads bound to a single CPU can safely use
* smp_processor_id():
*/
@@ -732,7 +732,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/samples/trace_events/trace-events-sample.c
+++ b/samples/trace_events/trace-events-sample.c
-@@ -33,7 +33,7 @@ static void simple_thread_func(int cnt)
+@@ -34,7 +34,7 @@ static void simple_thread_func(int cnt)
/* Silly tracepoints */
trace_foo_bar("hello", cnt, array, random_strings[len],
diff --git a/debian/patches-rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/debian/patches-rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
index e0b01b2a1..53ab2ed64 100644
--- a/debian/patches-rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
+++ b/debian/patches-rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 21 Nov 2016 19:31:08 +0100
Subject: [PATCH] kernel/sched: move stack + kprobe clean up to
__put_task_struct()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
There is no need to free the stack before the task struct (except for reasons
mentioned in commit 68f24b08ee89 ("sched/core: Free the stack early if
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -40,6 +40,7 @@
+@@ -43,6 +43,7 @@
#include <linux/hmm.h>
#include <linux/fs.h>
#include <linux/mm.h>
@@ -26,8 +26,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/vmacache.h>
#include <linux/nsproxy.h>
#include <linux/capability.h>
-@@ -735,6 +736,15 @@ void __put_task_struct(struct task_struc
- WARN_ON(atomic_read(&tsk->usage));
+@@ -742,6 +743,15 @@ void __put_task_struct(struct task_struc
+ WARN_ON(refcount_read(&tsk->usage));
WARN_ON(tsk == current);
+ /*
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
security_task_free(tsk);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2760,15 +2760,6 @@ static struct rq *finish_task_switch(str
+@@ -2728,15 +2728,6 @@ static struct rq *finish_task_switch(str
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
diff --git a/debian/patches-rt/kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch b/debian/patches-rt/kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch
index 34730996e..509dfc7ad 100644
--- a/debian/patches-rt/kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch
+++ b/debian/patches-rt/kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch
@@ -1,7 +1,7 @@
From: He Zhe <zhe.he@windriver.com>
Date: Wed, 19 Dec 2018 16:30:57 +0100
Subject: [PATCH] kmemleak: Turn kmemleak_lock to raw spinlock on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
kmemleak_lock, as a rwlock on RT, can possibly be held in atomic context and
causes the follow BUG.
@@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
-@@ -26,7 +26,7 @@
+@@ -13,7 +13,7 @@
*
* The following locks and mutexes are used by kmemleak:
*
@@ -85,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* accesses to the object_tree_root. The object_list is the main list
* holding the metadata (struct kmemleak_object) for the allocated memory
* blocks. The object_tree_root is a red black tree used to look-up
-@@ -199,7 +199,7 @@ static LIST_HEAD(gray_list);
+@@ -186,7 +186,7 @@ static LIST_HEAD(gray_list);
/* search tree for object boundaries */
static struct rb_root object_tree_root = RB_ROOT;
/* rw_lock protecting the access to object_list and object_tree_root */
@@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* allocation caches for kmemleak internal data */
static struct kmem_cache *object_cache;
-@@ -515,9 +515,9 @@ static struct kmemleak_object *find_and_
+@@ -497,9 +497,9 @@ static struct kmemleak_object *find_and_
struct kmemleak_object *object;
rcu_read_lock();
@@ -106,7 +106,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* check whether the object is still available */
if (object && !get_object(object))
-@@ -537,13 +537,13 @@ static struct kmemleak_object *find_and_
+@@ -519,13 +519,13 @@ static struct kmemleak_object *find_and_
unsigned long flags;
struct kmemleak_object *object;
@@ -122,7 +122,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return object;
}
-@@ -618,7 +618,7 @@ static struct kmemleak_object *create_ob
+@@ -592,7 +592,7 @@ static struct kmemleak_object *create_ob
/* kernel backtrace */
object->trace_len = __save_stack_trace(object->trace);
@@ -131,7 +131,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
min_addr = min(min_addr, untagged_ptr);
-@@ -650,7 +650,7 @@ static struct kmemleak_object *create_ob
+@@ -624,7 +624,7 @@ static struct kmemleak_object *create_ob
list_add_tail_rcu(&object->object_list, &object_list);
out:
@@ -140,7 +140,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return object;
}
-@@ -1337,7 +1337,7 @@ static void scan_block(void *_start, voi
+@@ -1311,7 +1311,7 @@ static void scan_block(void *_start, voi
unsigned long flags;
unsigned long untagged_ptr;
@@ -149,7 +149,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for (ptr = start; ptr < end; ptr++) {
struct kmemleak_object *object;
unsigned long pointer;
-@@ -1395,7 +1395,7 @@ static void scan_block(void *_start, voi
+@@ -1369,7 +1369,7 @@ static void scan_block(void *_start, voi
spin_unlock(&object->lock);
}
}
diff --git a/debian/patches-rt/kthread-Do-not-use-TIMER_IRQSAFE.patch b/debian/patches-rt/kthread-Do-not-use-TIMER_IRQSAFE.patch
deleted file mode 100644
index be11fca87..000000000
--- a/debian/patches-rt/kthread-Do-not-use-TIMER_IRQSAFE.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 12 Feb 2019 12:57:45 +0100
-Subject: [PATCH] kthread: Do not use TIMER_IRQSAFE
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-The TIMER_IRQSAFE was introduced in commit
-
- 22597dc3d97b1 ("kthread: initial support for delayed kthread work")
-
-which modelled the delayed kthread code after workqueue's code. The
-workqueue code requires the flag TIMER_IRQSAFE for synchronisation
-purpose. This is not true for kthread's delay timer since all
-operations occur under a lock.
-
-Remove TIMER_IRQSAFE from the timer initialisation.
-Use timer_setup() for initialisation purpose which is the official
-function.
-
-Cc: Petr Mladek <pmladek@suse.com>
-Cc: Ingo Molnar <mingo@kernel.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/kthread.h | 5 ++---
- kernel/kthread.c | 5 +++--
- 2 files changed, 5 insertions(+), 5 deletions(-)
-
---- a/include/linux/kthread.h
-+++ b/include/linux/kthread.h
-@@ -164,9 +164,8 @@ extern void __kthread_init_worker(struct
- #define kthread_init_delayed_work(dwork, fn) \
- do { \
- kthread_init_work(&(dwork)->work, (fn)); \
-- __init_timer(&(dwork)->timer, \
-- kthread_delayed_work_timer_fn, \
-- TIMER_IRQSAFE); \
-+ timer_setup(&(dwork)->timer, \
-+ kthread_delayed_work_timer_fn, 0); \
- } while (0)
-
- int kthread_worker_fn(void *worker_ptr);
---- a/kernel/kthread.c
-+++ b/kernel/kthread.c
-@@ -835,6 +835,7 @@ void kthread_delayed_work_timer_fn(struc
- struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
- struct kthread_work *work = &dwork->work;
- struct kthread_worker *worker = work->worker;
-+ unsigned long flags;
-
- /*
- * This might happen when a pending work is reinitialized.
-@@ -843,7 +844,7 @@ void kthread_delayed_work_timer_fn(struc
- if (WARN_ON_ONCE(!worker))
- return;
-
-- raw_spin_lock(&worker->lock);
-+ raw_spin_lock_irqsave(&worker->lock, flags);
- /* Work must not be used with >1 worker, see kthread_queue_work(). */
- WARN_ON_ONCE(work->worker != worker);
-
-@@ -852,7 +853,7 @@ void kthread_delayed_work_timer_fn(struc
- list_del_init(&work->node);
- kthread_insert_work(worker, work, &worker->work_list);
-
-- raw_spin_unlock(&worker->lock);
-+ raw_spin_unlock_irqrestore(&worker->lock, flags);
- }
- EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
-
diff --git a/debian/patches-rt/kthread-add-a-global-worker-thread.patch b/debian/patches-rt/kthread-add-a-global-worker-thread.patch
deleted file mode 100644
index 93a90cc6c..000000000
--- a/debian/patches-rt/kthread-add-a-global-worker-thread.patch
+++ /dev/null
@@ -1,145 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 12 Feb 2019 15:09:38 +0100
-Subject: [PATCH] kthread: add a global worker thread.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Add kthread_schedule_work() which uses a global kthread for all its
-jobs.
-Split the cgroup include to avoid recussive includes from interrupt.h.
-Fixup everything that fails to build (and did not include all header).
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/block/loop.c | 2 +-
- drivers/spi/spi-rockchip.c | 1 +
- include/linux/blk-cgroup.h | 2 +-
- include/linux/kthread-cgroup.h | 17 +++++++++++++++++
- include/linux/kthread.h | 15 ++++++---------
- init/main.c | 1 +
- kernel/kthread.c | 14 ++++++++++++++
- 7 files changed, 41 insertions(+), 11 deletions(-)
- create mode 100644 include/linux/kthread-cgroup.h
-
---- a/drivers/block/loop.c
-+++ b/drivers/block/loop.c
-@@ -70,7 +70,7 @@
- #include <linux/writeback.h>
- #include <linux/completion.h>
- #include <linux/highmem.h>
--#include <linux/kthread.h>
-+#include <linux/kthread-cgroup.h>
- #include <linux/splice.h>
- #include <linux/sysfs.h>
- #include <linux/miscdevice.h>
---- a/drivers/spi/spi-rockchip.c
-+++ b/drivers/spi/spi-rockchip.c
-@@ -22,6 +22,7 @@
- #include <linux/spi/spi.h>
- #include <linux/pm_runtime.h>
- #include <linux/scatterlist.h>
-+#include <linux/interrupt.h>
-
- #define DRIVER_NAME "rockchip-spi"
-
---- a/include/linux/blk-cgroup.h
-+++ b/include/linux/blk-cgroup.h
-@@ -20,7 +20,7 @@
- #include <linux/radix-tree.h>
- #include <linux/blkdev.h>
- #include <linux/atomic.h>
--#include <linux/kthread.h>
-+#include <linux/kthread-cgroup.h>
- #include <linux/fs.h>
-
- /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
---- /dev/null
-+++ b/include/linux/kthread-cgroup.h
-@@ -0,0 +1,17 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+#ifndef _LINUX_KTHREAD_CGROUP_H
-+#define _LINUX_KTHREAD_CGROUP_H
-+#include <linux/kthread.h>
-+#include <linux/cgroup.h>
-+
-+#ifdef CONFIG_BLK_CGROUP
-+void kthread_associate_blkcg(struct cgroup_subsys_state *css);
-+struct cgroup_subsys_state *kthread_blkcg(void);
-+#else
-+static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { }
-+static inline struct cgroup_subsys_state *kthread_blkcg(void)
-+{
-+ return NULL;
-+}
-+#endif
-+#endif
---- a/include/linux/kthread.h
-+++ b/include/linux/kthread.h
-@@ -4,7 +4,6 @@
- /* Simple interface for creating and stopping kernel threads without mess. */
- #include <linux/err.h>
- #include <linux/sched.h>
--#include <linux/cgroup.h>
-
- __printf(4, 5)
- struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
-@@ -197,14 +196,12 @@ bool kthread_cancel_delayed_work_sync(st
-
- void kthread_destroy_worker(struct kthread_worker *worker);
-
--#ifdef CONFIG_BLK_CGROUP
--void kthread_associate_blkcg(struct cgroup_subsys_state *css);
--struct cgroup_subsys_state *kthread_blkcg(void);
--#else
--static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { }
--static inline struct cgroup_subsys_state *kthread_blkcg(void)
-+extern struct kthread_worker kthread_global_worker;
-+void kthread_init_global_worker(void);
-+
-+static inline bool kthread_schedule_work(struct kthread_work *work)
- {
-- return NULL;
-+ return kthread_queue_work(&kthread_global_worker, work);
- }
--#endif
-+
- #endif /* _LINUX_KTHREAD_H */
---- a/init/main.c
-+++ b/init/main.c
-@@ -1119,6 +1119,7 @@ static noinline void __init kernel_init_
- smp_prepare_cpus(setup_max_cpus);
-
- workqueue_init();
-+ kthread_init_global_worker();
-
- init_mm_internals();
-
---- a/kernel/kthread.c
-+++ b/kernel/kthread.c
-@@ -20,6 +20,7 @@
- #include <linux/freezer.h>
- #include <linux/ptrace.h>
- #include <linux/uaccess.h>
-+#include <linux/cgroup.h>
- #include <trace/events/sched.h>
-
- static DEFINE_SPINLOCK(kthread_create_lock);
-@@ -1181,6 +1182,19 @@ void kthread_destroy_worker(struct kthre
- }
- EXPORT_SYMBOL(kthread_destroy_worker);
-
-+DEFINE_KTHREAD_WORKER(kthread_global_worker);
-+EXPORT_SYMBOL(kthread_global_worker);
-+
-+__init void kthread_init_global_worker(void)
-+{
-+ kthread_global_worker.task = kthread_create(kthread_worker_fn,
-+ &kthread_global_worker,
-+ "kswork");
-+ if (WARN_ON(IS_ERR(kthread_global_worker.task)))
-+ return;
-+ wake_up_process(kthread_global_worker.task);
-+}
-+
- #ifdef CONFIG_BLK_CGROUP
- /**
- * kthread_associate_blkcg - associate blkcg to current kthread
diff --git a/debian/patches-rt/kthread-convert-worker-lock-to-raw-spinlock.patch b/debian/patches-rt/kthread-convert-worker-lock-to-raw-spinlock.patch
deleted file mode 100644
index 85d45f49e..000000000
--- a/debian/patches-rt/kthread-convert-worker-lock-to-raw-spinlock.patch
+++ /dev/null
@@ -1,204 +0,0 @@
-From: Julia Cartwright <julia@ni.com>
-Date: Fri, 28 Sep 2018 21:03:51 +0000
-Subject: [PATCH] kthread: convert worker lock to raw spinlock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-In order to enable the queuing of kthread work items from hardirq
-context even when PREEMPT_RT_FULL is enabled, convert the worker
-spin_lock to a raw_spin_lock.
-
-This is only acceptable to do because the work performed under the lock
-is well-bounded and minimal.
-
-Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Cc: Guenter Roeck <linux@roeck-us.net>
-Reported-and-tested-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
-Reported-by: Tim Sander <tim@krieglstein.org>
-Signed-off-by: Julia Cartwright <julia@ni.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/kthread.h | 4 ++--
- kernel/kthread.c | 42 +++++++++++++++++++++---------------------
- 2 files changed, 23 insertions(+), 23 deletions(-)
-
---- a/include/linux/kthread.h
-+++ b/include/linux/kthread.h
-@@ -85,7 +85,7 @@ enum {
-
- struct kthread_worker {
- unsigned int flags;
-- spinlock_t lock;
-+ raw_spinlock_t lock;
- struct list_head work_list;
- struct list_head delayed_work_list;
- struct task_struct *task;
-@@ -106,7 +106,7 @@ struct kthread_delayed_work {
- };
-
- #define KTHREAD_WORKER_INIT(worker) { \
-- .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \
-+ .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \
- .work_list = LIST_HEAD_INIT((worker).work_list), \
- .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
- }
---- a/kernel/kthread.c
-+++ b/kernel/kthread.c
-@@ -599,7 +599,7 @@ void __kthread_init_worker(struct kthrea
- struct lock_class_key *key)
- {
- memset(worker, 0, sizeof(struct kthread_worker));
-- spin_lock_init(&worker->lock);
-+ raw_spin_lock_init(&worker->lock);
- lockdep_set_class_and_name(&worker->lock, key, name);
- INIT_LIST_HEAD(&worker->work_list);
- INIT_LIST_HEAD(&worker->delayed_work_list);
-@@ -641,21 +641,21 @@ int kthread_worker_fn(void *worker_ptr)
-
- if (kthread_should_stop()) {
- __set_current_state(TASK_RUNNING);
-- spin_lock_irq(&worker->lock);
-+ raw_spin_lock_irq(&worker->lock);
- worker->task = NULL;
-- spin_unlock_irq(&worker->lock);
-+ raw_spin_unlock_irq(&worker->lock);
- return 0;
- }
-
- work = NULL;
-- spin_lock_irq(&worker->lock);
-+ raw_spin_lock_irq(&worker->lock);
- if (!list_empty(&worker->work_list)) {
- work = list_first_entry(&worker->work_list,
- struct kthread_work, node);
- list_del_init(&work->node);
- }
- worker->current_work = work;
-- spin_unlock_irq(&worker->lock);
-+ raw_spin_unlock_irq(&worker->lock);
-
- if (work) {
- __set_current_state(TASK_RUNNING);
-@@ -812,12 +812,12 @@ bool kthread_queue_work(struct kthread_w
- bool ret = false;
- unsigned long flags;
-
-- spin_lock_irqsave(&worker->lock, flags);
-+ raw_spin_lock_irqsave(&worker->lock, flags);
- if (!queuing_blocked(worker, work)) {
- kthread_insert_work(worker, work, &worker->work_list);
- ret = true;
- }
-- spin_unlock_irqrestore(&worker->lock, flags);
-+ raw_spin_unlock_irqrestore(&worker->lock, flags);
- return ret;
- }
- EXPORT_SYMBOL_GPL(kthread_queue_work);
-@@ -843,7 +843,7 @@ void kthread_delayed_work_timer_fn(struc
- if (WARN_ON_ONCE(!worker))
- return;
-
-- spin_lock(&worker->lock);
-+ raw_spin_lock(&worker->lock);
- /* Work must not be used with >1 worker, see kthread_queue_work(). */
- WARN_ON_ONCE(work->worker != worker);
-
-@@ -852,7 +852,7 @@ void kthread_delayed_work_timer_fn(struc
- list_del_init(&work->node);
- kthread_insert_work(worker, work, &worker->work_list);
-
-- spin_unlock(&worker->lock);
-+ raw_spin_unlock(&worker->lock);
- }
- EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
-
-@@ -908,14 +908,14 @@ bool kthread_queue_delayed_work(struct k
- unsigned long flags;
- bool ret = false;
-
-- spin_lock_irqsave(&worker->lock, flags);
-+ raw_spin_lock_irqsave(&worker->lock, flags);
-
- if (!queuing_blocked(worker, work)) {
- __kthread_queue_delayed_work(worker, dwork, delay);
- ret = true;
- }
-
-- spin_unlock_irqrestore(&worker->lock, flags);
-+ raw_spin_unlock_irqrestore(&worker->lock, flags);
- return ret;
- }
- EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
-@@ -951,7 +951,7 @@ void kthread_flush_work(struct kthread_w
- if (!worker)
- return;
-
-- spin_lock_irq(&worker->lock);
-+ raw_spin_lock_irq(&worker->lock);
- /* Work must not be used with >1 worker, see kthread_queue_work(). */
- WARN_ON_ONCE(work->worker != worker);
-
-@@ -963,7 +963,7 @@ void kthread_flush_work(struct kthread_w
- else
- noop = true;
-
-- spin_unlock_irq(&worker->lock);
-+ raw_spin_unlock_irq(&worker->lock);
-
- if (!noop)
- wait_for_completion(&fwork.done);
-@@ -996,9 +996,9 @@ static bool __kthread_cancel_work(struct
- * any queuing is blocked by setting the canceling counter.
- */
- work->canceling++;
-- spin_unlock_irqrestore(&worker->lock, *flags);
-+ raw_spin_unlock_irqrestore(&worker->lock, *flags);
- del_timer_sync(&dwork->timer);
-- spin_lock_irqsave(&worker->lock, *flags);
-+ raw_spin_lock_irqsave(&worker->lock, *flags);
- work->canceling--;
- }
-
-@@ -1045,7 +1045,7 @@ bool kthread_mod_delayed_work(struct kth
- unsigned long flags;
- int ret = false;
-
-- spin_lock_irqsave(&worker->lock, flags);
-+ raw_spin_lock_irqsave(&worker->lock, flags);
-
- /* Do not bother with canceling when never queued. */
- if (!work->worker)
-@@ -1062,7 +1062,7 @@ bool kthread_mod_delayed_work(struct kth
- fast_queue:
- __kthread_queue_delayed_work(worker, dwork, delay);
- out:
-- spin_unlock_irqrestore(&worker->lock, flags);
-+ raw_spin_unlock_irqrestore(&worker->lock, flags);
- return ret;
- }
- EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
-@@ -1076,7 +1076,7 @@ static bool __kthread_cancel_work_sync(s
- if (!worker)
- goto out;
-
-- spin_lock_irqsave(&worker->lock, flags);
-+ raw_spin_lock_irqsave(&worker->lock, flags);
- /* Work must not be used with >1 worker, see kthread_queue_work(). */
- WARN_ON_ONCE(work->worker != worker);
-
-@@ -1090,13 +1090,13 @@ static bool __kthread_cancel_work_sync(s
- * In the meantime, block any queuing by setting the canceling counter.
- */
- work->canceling++;
-- spin_unlock_irqrestore(&worker->lock, flags);
-+ raw_spin_unlock_irqrestore(&worker->lock, flags);
- kthread_flush_work(work);
-- spin_lock_irqsave(&worker->lock, flags);
-+ raw_spin_lock_irqsave(&worker->lock, flags);
- work->canceling--;
-
- out_fast:
-- spin_unlock_irqrestore(&worker->lock, flags);
-+ raw_spin_unlock_irqrestore(&worker->lock, flags);
- out:
- return ret;
- }
diff --git a/debian/patches-rt/leds-trigger-disable-CPU-trigger-on-RT.patch b/debian/patches-rt/leds-trigger-disable-CPU-trigger-on-RT.patch
index 60dad335c..1dcebb988 100644
--- a/debian/patches-rt/leds-trigger-disable-CPU-trigger-on-RT.patch
+++ b/debian/patches-rt/leds-trigger-disable-CPU-trigger-on-RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 23 Jan 2014 14:45:59 +0100
Subject: leds: trigger: disable CPU trigger on -RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
as it triggers:
|CPU: 0 PID: 0 Comm: swapper Not tainted 3.12.8-rt10 #141
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
-@@ -63,6 +63,7 @@ config LEDS_TRIGGER_BACKLIGHT
+@@ -64,6 +64,7 @@ config LEDS_TRIGGER_BACKLIGHT
config LEDS_TRIGGER_CPU
bool "LED CPU Trigger"
diff --git a/debian/patches-rt/list_bl-fixup-bogus-lockdep-warning.patch b/debian/patches-rt/list_bl-fixup-bogus-lockdep-warning.patch
index de125f3e2..d6e1fb9e5 100644
--- a/debian/patches-rt/list_bl-fixup-bogus-lockdep-warning.patch
+++ b/debian/patches-rt/list_bl-fixup-bogus-lockdep-warning.patch
@@ -1,7 +1,7 @@
From: Josh Cartwright <joshc@ni.com>
Date: Thu, 31 Mar 2016 00:04:25 -0500
Subject: [PATCH] list_bl: fixup bogus lockdep warning
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
At first glance, the use of 'static inline' seems appropriate for
INIT_HLIST_BL_HEAD().
diff --git a/debian/patches-rt/list_bl.h-make-list-head-locking-RT-safe.patch b/debian/patches-rt/list_bl.h-make-list-head-locking-RT-safe.patch
index 37c63205f..cb33707f3 100644
--- a/debian/patches-rt/list_bl.h-make-list-head-locking-RT-safe.patch
+++ b/debian/patches-rt/list_bl.h-make-list-head-locking-RT-safe.patch
@@ -1,7 +1,7 @@
From: Paul Gortmaker <paul.gortmaker@windriver.com>
Date: Fri, 21 Jun 2013 15:07:25 -0400
Subject: list_bl: Make list head locking RT safe
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
As per changes in include/linux/jbd_common.h for avoiding the
bit_spin_locks on RT ("fs: jbd/jbd2: Make state lock and journal
@@ -86,7 +86,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
{
-@@ -119,12 +129,26 @@ static inline void hlist_bl_del_init(str
+@@ -145,12 +155,26 @@ static inline void hlist_bl_del_init(str
static inline void hlist_bl_lock(struct hlist_bl_head *b)
{
diff --git a/debian/patches-rt/locallock-provide-get-put-_locked_ptr-variants.patch b/debian/patches-rt/locallock-provide-get-put-_locked_ptr-variants.patch
index 1b21604fb..a9f191bbd 100644
--- a/debian/patches-rt/locallock-provide-get-put-_locked_ptr-variants.patch
+++ b/debian/patches-rt/locallock-provide-get-put-_locked_ptr-variants.patch
@@ -1,7 +1,7 @@
From: Julia Cartwright <julia@ni.com>
Date: Mon, 7 May 2018 08:58:56 -0500
Subject: [PATCH] locallock: provide {get,put}_locked_ptr() variants
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Provide a set of locallocked accessors for pointers to per-CPU data;
this is useful for dynamically-allocated per-CPU regions, for example.
diff --git a/debian/patches-rt/localversion.patch b/debian/patches-rt/localversion.patch
index f3235db74..79f1c5f21 100644
--- a/debian/patches-rt/localversion.patch
+++ b/debian/patches-rt/localversion.patch
@@ -1,7 +1,7 @@
Subject: Add localversion for -RT release
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 08 Jul 2011 20:25:16 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
@@ -11,4 +11,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt7
++-rt1
diff --git a/debian/patches-rt/lockdep-disable-self-test.patch b/debian/patches-rt/lockdep-disable-self-test.patch
index 4a515a96f..608ec38a0 100644
--- a/debian/patches-rt/lockdep-disable-self-test.patch
+++ b/debian/patches-rt/lockdep-disable-self-test.patch
@@ -4,7 +4,7 @@ Subject: [PATCH] lockdep: disable self-test
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The self-test wasn't always 100% accurate for RT. We disabled a few
tests which failed because they had a different semantic for RT. Some
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
-@@ -1239,7 +1239,7 @@ config DEBUG_ATOMIC_SLEEP
+@@ -1268,7 +1268,7 @@ config DEBUG_ATOMIC_SLEEP
config DEBUG_LOCKING_API_SELFTESTS
bool "Locking API boot-time self-tests"
diff --git a/debian/patches-rt/lockdep-no-softirq-accounting-on-rt.patch b/debian/patches-rt/lockdep-no-softirq-accounting-on-rt.patch
index ea53770cc..a7e7bb547 100644
--- a/debian/patches-rt/lockdep-no-softirq-accounting-on-rt.patch
+++ b/debian/patches-rt/lockdep-no-softirq-accounting-on-rt.patch
@@ -1,7 +1,7 @@
Subject: lockdep: Make it RT aware
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 18:51:23 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
teach lockdep that we don't really do softirqs on -RT.
@@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
defined(CONFIG_PREEMPT_TRACER)
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
-@@ -3767,6 +3767,7 @@ static void check_flags(unsigned long fl
+@@ -4227,6 +4227,7 @@ static void check_flags(unsigned long fl
}
}
@@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
-@@ -3781,6 +3782,7 @@ static void check_flags(unsigned long fl
+@@ -4241,6 +4242,7 @@ static void check_flags(unsigned long fl
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
}
diff --git a/debian/patches-rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch b/debian/patches-rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
index 6bb113a81..c33d0e30e 100644
--- a/debian/patches-rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
+++ b/debian/patches-rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
@@ -1,7 +1,7 @@
From: Josh Cartwright <josh.cartwright@ni.com>
Date: Wed, 28 Jan 2015 13:08:45 -0600
Subject: lockdep: selftest: fix warnings due to missing PREEMPT_RT conditionals
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
"lockdep: Selftest: Only do hardirq context test for raw spinlock"
disabled the execution of certain tests with PREEMPT_RT_FULL, but did
diff --git a/debian/patches-rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch b/debian/patches-rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
index f89567444..f215fee53 100644
--- a/debian/patches-rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
+++ b/debian/patches-rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
@@ -1,7 +1,7 @@
Subject: lockdep: selftest: Only do hardirq context test for raw spinlock
From: Yong Zhang <yong.zhang0@gmail.com>
Date: Mon, 16 Apr 2012 15:01:56 +0800
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
From: Yong Zhang <yong.zhang@windriver.com>
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
-@@ -2057,6 +2057,7 @@ void locking_selftest(void)
+@@ -2058,6 +2058,7 @@ void locking_selftest(void)
printk(" --------------------------------------------------------------------------\n");
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* irq-context testcases:
*/
-@@ -2069,6 +2070,28 @@ void locking_selftest(void)
+@@ -2070,6 +2071,28 @@ void locking_selftest(void)
DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
diff --git a/debian/patches-rt/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch b/debian/patches-rt/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch
index 25beaece5..734262630 100644
--- a/debian/patches-rt/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch
+++ b/debian/patches-rt/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 4 Aug 2017 17:40:42 +0200
Subject: [PATCH 1/2] locking: don't check for __LINUX_SPINLOCK_TYPES_H on -RT
archs
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Upstream uses arch_spinlock_t within spinlock_t and requests that
spinlock_types.h header file is included first.
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
typedef struct {
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
-@@ -16,10 +16,6 @@
+@@ -5,10 +5,6 @@
#ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/hexagon/include/asm/spinlock_types.h
+++ b/arch/hexagon/include/asm/spinlock_types.h
-@@ -21,10 +21,6 @@
+@@ -8,10 +8,6 @@
#ifndef _ASM_SPINLOCK_TYPES_H
#define _ASM_SPINLOCK_TYPES_H
@@ -136,13 +136,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H
--#ifndef __LINUX_SPINLOCK_TYPES_H
+-#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H)
-# error "please don't include this file directly"
-#endif
-
- typedef struct {
- volatile unsigned int slock;
- } arch_spinlock_t;
+ #include <asm-generic/qspinlock_types.h>
+ #include <asm-generic/qrwlock_types.h>
+
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -1,10 +1,6 @@
diff --git a/debian/patches-rt/locking-lockdep-Don-t-complain-about-incorrect-name-.patch b/debian/patches-rt/locking-lockdep-Don-t-complain-about-incorrect-name-.patch
new file mode 100644
index 000000000..4ee609fa9
--- /dev/null
+++ b/debian/patches-rt/locking-lockdep-Don-t-complain-about-incorrect-name-.patch
@@ -0,0 +1,44 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 17 May 2019 23:22:34 +0200
+Subject: [PATCH] locking/lockdep: Don't complain about incorrect name for no
+ validate class
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+[ Upstream commit 978315462d3ea3cf6cfacd34c563ec1eb02a3aa5 ]
+
+It is possible to ignore the validation for a certain lock by using:
+
+ lockdep_set_novalidate_class()
+
+on it. Each invocation will assign a new name to the class it created
+for created __lockdep_no_validate__. That means that once
+lockdep_set_novalidate_class() has been used on two locks then
+class->name won't match lock->name for the first lock triggering the
+warning.
+
+So ignore changed non-matching ->name pointer for the special
+__lockdep_no_validate__ class.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Will Deacon <will.deacon@arm.com>
+Link: http://lkml.kernel.org/r/20190517212234.32611-1-bigeasy@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ kernel/locking/lockdep.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -732,7 +732,8 @@ look_up_lock_class(const struct lockdep_
+ * Huh! same key, different name? Did someone trample
+ * on some memory? We're most confused.
+ */
+- WARN_ON_ONCE(class->name != lock->name);
++ WARN_ON_ONCE(class->name != lock->name &&
++ lock->key != &__lockdep_no_validate__);
+ return class;
+ }
+ }
diff --git a/debian/patches-rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch b/debian/patches-rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
index f3cb62d5a..f1405f220 100644
--- a/debian/patches-rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
+++ b/debian/patches-rt/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
@@ -1,7 +1,7 @@
From: "Wolfgang M. Reimer" <linuxball@gmail.com>
Date: Tue, 21 Jul 2015 16:20:07 +0200
Subject: locking: locktorture: Do NOT include rwlock.h directly
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Including rwlock.h directly will cause kernel builds to fail
if CONFIG_PREEMPT_RT_FULL is defined. The correct header file
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
-@@ -29,7 +29,6 @@
+@@ -16,7 +16,6 @@
#include <linux/kthread.h>
#include <linux/sched/rt.h>
#include <linux/spinlock.h>
diff --git a/debian/patches-rt/locking-rt-mutex-Flush-block-plug-on-__down_read.patch b/debian/patches-rt/locking-rt-mutex-Flush-block-plug-on-__down_read.patch
index e7388bec1..fee6a50f9 100644
--- a/debian/patches-rt/locking-rt-mutex-Flush-block-plug-on-__down_read.patch
+++ b/debian/patches-rt/locking-rt-mutex-Flush-block-plug-on-__down_read.patch
@@ -1,7 +1,7 @@
From: Scott Wood <swood@redhat.com>
Date: Fri, 4 Jan 2019 15:33:21 -0500
Subject: [PATCH] locking/rt-mutex: Flush block plug on __down_read()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
__down_read() bypasses the rtmutex frontend to call
rt_mutex_slowlock_locked() directly, and thus it needs to call
diff --git a/debian/patches-rt/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch b/debian/patches-rt/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch
index c6af5f5ff..4ef80a2f9 100644
--- a/debian/patches-rt/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch
+++ b/debian/patches-rt/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch
@@ -1,7 +1,7 @@
From: Mikulas Patocka <mpatocka@redhat.com>
Date: Mon, 13 Nov 2017 12:56:53 -0500
Subject: [PATCH] locking/rt-mutex: fix deadlock in device mapper / block-IO
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
When some block device driver creates a bio and submits it to another
block device driver, the bio is added to current->bio_list (in order to
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -24,6 +24,7 @@
+@@ -25,6 +25,7 @@
#include <linux/sched/debug.h>
#include <linux/timer.h>
#include <linux/ww_mutex.h>
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "rtmutex_common.h"
-@@ -1919,6 +1920,15 @@ rt_mutex_fastlock(struct rt_mutex *lock,
+@@ -1895,6 +1896,15 @@ rt_mutex_fastlock(struct rt_mutex *lock,
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
return 0;
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx);
}
-@@ -1936,6 +1946,9 @@ rt_mutex_timed_fastlock(struct rt_mutex
+@@ -1912,6 +1922,9 @@ rt_mutex_timed_fastlock(struct rt_mutex
likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
return 0;
diff --git a/debian/patches-rt/locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch b/debian/patches-rt/locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch
index 1b8f6ab4d..4f496311e 100644
--- a/debian/patches-rt/locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch
+++ b/debian/patches-rt/locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 16 Nov 2017 16:48:48 +0100
Subject: [PATCH] locking/rtmutex: re-init the wait_lock in
rt_mutex_init_proxy_locked()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
We could provide a key-class for the lockdep (and fixup all callers) or
move the init to all callers (like it was) in order to avoid lockdep
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -2281,6 +2281,14 @@ void rt_mutex_init_proxy_locked(struct r
+@@ -2257,6 +2257,14 @@ void rt_mutex_init_proxy_locked(struct r
struct task_struct *proxy_owner)
{
__rt_mutex_init(lock, NULL, NULL);
diff --git a/debian/patches-rt/md-disable-bcache.patch b/debian/patches-rt/md-disable-bcache.patch
index 6ed3ba6cc..127496bc3 100644
--- a/debian/patches-rt/md-disable-bcache.patch
+++ b/debian/patches-rt/md-disable-bcache.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 29 Aug 2013 11:48:57 +0200
Subject: md: disable bcache
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
It uses anon semaphores
|drivers/md/bcache/request.c: In function ‘cached_dev_write_complete’:
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
-@@ -1,6 +1,7 @@
+@@ -2,6 +2,7 @@
config BCACHE
tristate "Block device as cache"
diff --git a/debian/patches-rt/md-raid5-percpu-handling-rt-aware.patch b/debian/patches-rt/md-raid5-percpu-handling-rt-aware.patch
index 766c365d4..bb5a04164 100644
--- a/debian/patches-rt/md-raid5-percpu-handling-rt-aware.patch
+++ b/debian/patches-rt/md-raid5-percpu-handling-rt-aware.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 6 Apr 2010 16:51:31 +0200
Subject: md: raid5: Make raid5_percpu handling RT aware
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
__raid_run_ops() disables preemption with get_cpu() around the access
to the raid5_percpu variables. That causes scheduling while atomic
@@ -21,7 +21,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
-@@ -2069,8 +2069,9 @@ static void raid_run_ops(struct stripe_h
+@@ -2058,8 +2058,9 @@ static void raid_run_ops(struct stripe_h
struct raid5_percpu *percpu;
unsigned long cpu;
@@ -32,7 +32,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
ops_run_biofill(sh);
overlap_clear++;
-@@ -2129,7 +2130,8 @@ static void raid_run_ops(struct stripe_h
+@@ -2118,7 +2119,8 @@ static void raid_run_ops(struct stripe_h
if (test_and_clear_bit(R5_Overlap, &dev->flags))
wake_up(&sh->raid_conf->wait_for_overlap);
}
@@ -42,7 +42,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
}
static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh)
-@@ -6815,6 +6817,7 @@ static int raid456_cpu_up_prepare(unsign
+@@ -6813,6 +6815,7 @@ static int raid456_cpu_up_prepare(unsign
__func__, cpu);
return -ENOMEM;
}
@@ -58,5 +58,5 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
struct raid5_percpu {
+ spinlock_t lock; /* Protection for -RT */
struct page *spare_page; /* Used when checking P/Q in raid6 */
- struct flex_array *scribble; /* space for constructing buffer
- * lists and performing address
+ void *scribble; /* space for constructing buffer
+ * lists and performing address
diff --git a/debian/patches-rt/mips-disable-highmem-on-rt.patch b/debian/patches-rt/mips-disable-highmem-on-rt.patch
index 75b064803..262a36481 100644
--- a/debian/patches-rt/mips-disable-highmem-on-rt.patch
+++ b/debian/patches-rt/mips-disable-highmem-on-rt.patch
@@ -1,7 +1,7 @@
Subject: mips: Disable highmem on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 18 Jul 2011 17:10:12 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The current highmem handling on -RT is not compatible and needs fixups.
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
-@@ -2600,7 +2600,7 @@ config MIPS_CRC_SUPPORT
+@@ -2602,7 +2602,7 @@ config MIPS_CRC_SUPPORT
#
config HIGHMEM
bool "High Memory Support"
diff --git a/debian/patches-rt/mm-convert-swap-to-percpu-locked.patch b/debian/patches-rt/mm-convert-swap-to-percpu-locked.patch
index f3ab16e6b..a07f861ab 100644
--- a/debian/patches-rt/mm-convert-swap-to-percpu-locked.patch
+++ b/debian/patches-rt/mm-convert-swap-to-percpu-locked.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:51 -0500
Subject: mm/swap: Convert to percpu locked
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Replace global locks (get_cpu + local_irq_save) with "local_locks()".
Currently there is one of for "rotate" and one for "swap".
@@ -36,24 +36,24 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void lru_cache_add_file(struct page *page);
--- a/mm/compaction.c
+++ b/mm/compaction.c
-@@ -1658,10 +1658,12 @@ static enum compact_result compact_zone(
+@@ -2226,10 +2226,12 @@ compact_zone(struct compact_control *cc,
block_start_pfn(cc->migrate_pfn, cc->order);
- if (cc->last_migrated_pfn < current_block_start) {
+ if (last_migrated_pfn < current_block_start) {
- cpu = get_cpu();
+ cpu = get_cpu_light();
+ local_lock_irq(swapvec_lock);
lru_add_drain_cpu(cpu);
+ local_unlock_irq(swapvec_lock);
- drain_local_pages(zone);
+ drain_local_pages(cc->zone);
- put_cpu();
+ put_cpu_light();
/* No more flushing until we migrate again */
- cc->last_migrated_pfn = 0;
+ last_migrated_pfn = 0;
}
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -7443,8 +7443,9 @@ void __init free_area_init(unsigned long
+@@ -7583,8 +7583,9 @@ void __init free_area_init(unsigned long
static int page_alloc_cpu_dead(unsigned int cpu)
{
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/mm/swap.c
+++ b/mm/swap.c
-@@ -32,6 +32,7 @@
+@@ -33,6 +33,7 @@
#include <linux/memcontrol.h>
#include <linux/gfp.h>
#include <linux/uio.h>
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/hugetlb.h>
#include <linux/page_idle.h>
-@@ -50,6 +51,8 @@ static DEFINE_PER_CPU(struct pagevec, lr
+@@ -51,6 +52,8 @@ static DEFINE_PER_CPU(struct pagevec, lr
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
#endif
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* This path almost never happens for VM activity - pages are normally
-@@ -252,11 +255,11 @@ void rotate_reclaimable_page(struct page
+@@ -253,11 +256,11 @@ void rotate_reclaimable_page(struct page
unsigned long flags;
get_page(page);
@@ -97,7 +97,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -306,12 +309,13 @@ void activate_page(struct page *page)
+@@ -307,12 +310,13 @@ void activate_page(struct page *page)
{
page = compound_head(page);
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
@@ -113,7 +113,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -333,7 +337,7 @@ void activate_page(struct page *page)
+@@ -334,7 +338,7 @@ void activate_page(struct page *page)
static void __lru_cache_activate_page(struct page *page)
{
@@ -122,7 +122,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int i;
/*
-@@ -355,7 +359,7 @@ static void __lru_cache_activate_page(st
+@@ -356,7 +360,7 @@ static void __lru_cache_activate_page(st
}
}
@@ -131,7 +131,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -397,12 +401,12 @@ EXPORT_SYMBOL(mark_page_accessed);
+@@ -398,12 +402,12 @@ EXPORT_SYMBOL(mark_page_accessed);
static void __lru_cache_add(struct page *page)
{
@@ -146,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -580,9 +584,9 @@ void lru_add_drain_cpu(int cpu)
+@@ -581,9 +585,9 @@ void lru_add_drain_cpu(int cpu)
unsigned long flags;
/* No harm done if a racing interrupt already did this */
@@ -158,7 +158,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
-@@ -614,11 +618,12 @@ void deactivate_file_page(struct page *p
+@@ -615,11 +619,12 @@ void deactivate_file_page(struct page *p
return;
if (likely(get_page_unless_zero(page))) {
@@ -173,7 +173,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -633,19 +638,20 @@ void mark_page_lazyfree(struct page *pag
+@@ -634,19 +639,20 @@ void mark_page_lazyfree(struct page *pag
{
if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
!PageSwapCache(page) && !PageUnevictable(page)) {
diff --git a/debian/patches-rt/mm-disable-sloub-rt.patch b/debian/patches-rt/mm-disable-sloub-rt.patch
index 751c9def4..3efcd2ccb 100644
--- a/debian/patches-rt/mm-disable-sloub-rt.patch
+++ b/debian/patches-rt/mm-disable-sloub-rt.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:44:03 -0500
Subject: mm: Allow only slub on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Disable SLAB and SLOB on -RT. Only SLUB is adopted to -RT needs.
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1667,6 +1667,7 @@ choice
+@@ -1693,6 +1693,7 @@ choice
config SLAB
bool "SLAB"
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
The regular slab allocator that is established and known to work
-@@ -1687,6 +1688,7 @@ config SLUB
+@@ -1713,6 +1714,7 @@ config SLUB
config SLOB
depends on EXPERT
bool "SLOB (Simple Allocator)"
diff --git a/debian/patches-rt/mm-enable-slub.patch b/debian/patches-rt/mm-enable-slub.patch
index ca8c555cf..5f3133f74 100644
--- a/debian/patches-rt/mm-enable-slub.patch
+++ b/debian/patches-rt/mm-enable-slub.patch
@@ -1,7 +1,7 @@
Subject: mm: Enable SLUB for RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 25 Oct 2012 10:32:35 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Avoid the memory allocation in IRQ section
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -3732,6 +3732,11 @@ static void list_slab_objects(struct kme
+@@ -3721,6 +3721,11 @@ static void list_slab_objects(struct kme
const char *text)
{
#ifdef CONFIG_SLUB_DEBUG
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void *addr = page_address(page);
void *p;
unsigned long *map = bitmap_zalloc(page->objects, GFP_ATOMIC);
-@@ -3751,8 +3756,10 @@ static void list_slab_objects(struct kme
+@@ -3740,8 +3745,10 @@ static void list_slab_objects(struct kme
slab_unlock(page);
bitmap_free(map);
#endif
diff --git a/debian/patches-rt/mm-make-vmstat-rt-aware.patch b/debian/patches-rt/mm-make-vmstat-rt-aware.patch
index 57a2c6247..51b1998fb 100644
--- a/debian/patches-rt/mm-make-vmstat-rt-aware.patch
+++ b/debian/patches-rt/mm-make-vmstat-rt-aware.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:30:13 -0500
Subject: mm/vmstat: Protect per cpu variables with preempt disable on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Disable preemption on -RT for the vmstat code. On vanila the code runs in
IRQ-off regions while on -RT it is not. "preempt_disable" ensures that the
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline void count_vm_events(enum vm_event_item item, long delta)
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
-@@ -320,6 +320,7 @@ void __mod_zone_page_state(struct zone *
+@@ -321,6 +321,7 @@ void __mod_zone_page_state(struct zone *
long x;
long t;
@@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
x = delta + __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
-@@ -329,6 +330,7 @@ void __mod_zone_page_state(struct zone *
+@@ -330,6 +331,7 @@ void __mod_zone_page_state(struct zone *
x = 0;
}
__this_cpu_write(*p, x);
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__mod_zone_page_state);
-@@ -340,6 +342,7 @@ void __mod_node_page_state(struct pglist
+@@ -341,6 +343,7 @@ void __mod_node_page_state(struct pglist
long x;
long t;
@@ -63,7 +63,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
x = delta + __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
-@@ -349,6 +352,7 @@ void __mod_node_page_state(struct pglist
+@@ -350,6 +353,7 @@ void __mod_node_page_state(struct pglist
x = 0;
}
__this_cpu_write(*p, x);
@@ -71,7 +71,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__mod_node_page_state);
-@@ -381,6 +385,7 @@ void __inc_zone_state(struct zone *zone,
+@@ -382,6 +386,7 @@ void __inc_zone_state(struct zone *zone,
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
@@ -79,7 +79,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
-@@ -389,6 +394,7 @@ void __inc_zone_state(struct zone *zone,
+@@ -390,6 +395,7 @@ void __inc_zone_state(struct zone *zone,
zone_page_state_add(v + overstep, zone, item);
__this_cpu_write(*p, -overstep);
}
@@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
-@@ -397,6 +403,7 @@ void __inc_node_state(struct pglist_data
+@@ -398,6 +404,7 @@ void __inc_node_state(struct pglist_data
s8 __percpu *p = pcp->vm_node_stat_diff + item;
s8 v, t;
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
-@@ -405,6 +412,7 @@ void __inc_node_state(struct pglist_data
+@@ -406,6 +413,7 @@ void __inc_node_state(struct pglist_data
node_page_state_add(v + overstep, pgdat, item);
__this_cpu_write(*p, -overstep);
}
@@ -103,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
-@@ -425,6 +433,7 @@ void __dec_zone_state(struct zone *zone,
+@@ -426,6 +434,7 @@ void __dec_zone_state(struct zone *zone,
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
-@@ -433,6 +442,7 @@ void __dec_zone_state(struct zone *zone,
+@@ -434,6 +443,7 @@ void __dec_zone_state(struct zone *zone,
zone_page_state_add(v - overstep, zone, item);
__this_cpu_write(*p, overstep);
}
@@ -119,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
-@@ -441,6 +451,7 @@ void __dec_node_state(struct pglist_data
+@@ -442,6 +452,7 @@ void __dec_node_state(struct pglist_data
s8 __percpu *p = pcp->vm_node_stat_diff + item;
s8 v, t;
@@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
-@@ -449,6 +460,7 @@ void __dec_node_state(struct pglist_data
+@@ -450,6 +461,7 @@ void __dec_node_state(struct pglist_data
node_page_state_add(v - overstep, pgdat, item);
__this_cpu_write(*p, overstep);
}
diff --git a/debian/patches-rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/debian/patches-rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
index 327edaefb..919e6b7c5 100644
--- a/debian/patches-rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
+++ b/debian/patches-rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
@@ -1,7 +1,7 @@
From: Yang Shi <yang.shi@windriver.com>
Subject: mm/memcontrol: Don't call schedule_work_on in preemption disabled context
Date: Wed, 30 Oct 2013 11:48:33 -0700
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The following trace is triggered when running ltp oom test cases:
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -2072,7 +2072,7 @@ static void drain_all_stock(struct mem_c
+@@ -2159,7 +2159,7 @@ static void drain_all_stock(struct mem_c
* as well as workers from this path always operate on the local
* per-cpu data. CPU up doesn't touch memcg_stock at all.
*/
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
-@@ -2092,7 +2092,7 @@ static void drain_all_stock(struct mem_c
+@@ -2179,7 +2179,7 @@ static void drain_all_stock(struct mem_c
}
css_put(&memcg->css);
}
diff --git a/debian/patches-rt/mm-memcontrol-do_not_disable_irq.patch b/debian/patches-rt/mm-memcontrol-do_not_disable_irq.patch
index bd979c641..d9dabbcca 100644
--- a/debian/patches-rt/mm-memcontrol-do_not_disable_irq.patch
+++ b/debian/patches-rt/mm-memcontrol-do_not_disable_irq.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Subject: mm/memcontrol: Replace local_irq_disable with local locks
Date: Wed, 28 Jan 2015 17:14:16 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
There are a few local_irq_disable() which then take sleeping locks. This
patch converts them local locks.
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -69,6 +69,7 @@
+@@ -61,6 +61,7 @@
#include <net/sock.h>
#include <net/ip.h>
#include "slab.h"
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/uaccess.h>
-@@ -94,6 +95,8 @@ int do_swap_account __read_mostly;
+@@ -86,6 +87,8 @@ int do_swap_account __read_mostly;
#define do_swap_account 0
#endif
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
-@@ -4869,12 +4872,12 @@ static int mem_cgroup_move_account(struc
+@@ -4967,12 +4970,12 @@ static int mem_cgroup_move_account(struc
ret = 0;
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
unlock_page(page);
out:
-@@ -5993,10 +5996,10 @@ void mem_cgroup_commit_charge(struct pag
+@@ -6086,10 +6089,10 @@ void mem_cgroup_commit_charge(struct pag
commit_charge(page, memcg, lrucare);
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (do_memsw_account() && PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -6065,7 +6068,7 @@ static void uncharge_batch(const struct
+@@ -6158,7 +6161,7 @@ static void uncharge_batch(const struct
memcg_oom_recover(ug->memcg);
}
@@ -67,16 +67,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
__mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
-@@ -6073,7 +6076,7 @@ static void uncharge_batch(const struct
+@@ -6166,7 +6169,7 @@ static void uncharge_batch(const struct
__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
- __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages);
+ __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, nr_pages);
memcg_check_events(ug->memcg, ug->dummy_page);
- local_irq_restore(flags);
+ local_unlock_irqrestore(event_lock, flags);
if (!mem_cgroup_is_root(ug->memcg))
css_put_many(&ug->memcg->css, nr_pages);
-@@ -6236,10 +6239,10 @@ void mem_cgroup_migrate(struct page *old
+@@ -6329,10 +6332,10 @@ void mem_cgroup_migrate(struct page *old
commit_charge(newpage, memcg, false);
@@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
-@@ -6431,6 +6434,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -6524,6 +6527,7 @@ void mem_cgroup_swapout(struct page *pag
struct mem_cgroup *memcg, *swap_memcg;
unsigned int nr_entries;
unsigned short oldid;
@@ -97,7 +97,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -6476,13 +6480,17 @@ void mem_cgroup_swapout(struct page *pag
+@@ -6569,13 +6573,17 @@ void mem_cgroup_swapout(struct page *pag
* important here to have the interrupts disabled because it is the
* only synchronisation we have for updating the per-CPU variables.
*/
diff --git a/debian/patches-rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/debian/patches-rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index 5e7b494a1..b10cc35d6 100644
--- a/debian/patches-rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/debian/patches-rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:37 -0500
Subject: mm: page_alloc: rt-friendly per-cpu pages
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
rt-friendly per-cpu pages: convert the irqs-off per-cpu locking
method into a preemptible, explicit-per-cpu-locks method.
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -60,6 +60,7 @@
+@@ -62,6 +62,7 @@
#include <linux/hugetlb.h>
#include <linux/sched/rt.h>
#include <linux/sched/mm.h>
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/page_owner.h>
#include <linux/kthread.h>
#include <linux/memcontrol.h>
-@@ -295,6 +296,18 @@ EXPORT_SYMBOL(nr_node_ids);
+@@ -311,6 +312,18 @@ EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
#endif
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-@@ -1319,10 +1332,10 @@ static void __free_pages_ok(struct page
+@@ -1389,10 +1402,10 @@ static void __free_pages_ok(struct page
return;
migratetype = get_pfnblock_migratetype(page, pfn);
@@ -57,8 +57,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ local_unlock_irqrestore(pa_lock, flags);
}
- static void __init __free_pages_boot_core(struct page *page, unsigned int order)
-@@ -2632,13 +2645,13 @@ void drain_zone_pages(struct zone *zone,
+ void __free_pages_core(struct page *page, unsigned int order)
+@@ -2748,13 +2761,13 @@ void drain_zone_pages(struct zone *zone,
int to_drain, batch;
LIST_HEAD(dst);
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (to_drain > 0)
free_pcppages_bulk(zone, &dst, false);
-@@ -2660,7 +2673,7 @@ static void drain_pages_zone(unsigned in
+@@ -2776,7 +2789,7 @@ static void drain_pages_zone(unsigned in
LIST_HEAD(dst);
int count;
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
-@@ -2668,7 +2681,7 @@ static void drain_pages_zone(unsigned in
+@@ -2784,7 +2797,7 @@ static void drain_pages_zone(unsigned in
if (count)
isolate_pcp_pages(count, pcp, &dst);
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (count)
free_pcppages_bulk(zone, &dst, false);
-@@ -2706,6 +2719,7 @@ void drain_local_pages(struct zone *zone
+@@ -2822,6 +2835,7 @@ void drain_local_pages(struct zone *zone
drain_pages(cpu);
}
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void drain_local_pages_wq(struct work_struct *work)
{
struct pcpu_drain *drain;
-@@ -2723,6 +2737,7 @@ static void drain_local_pages_wq(struct
+@@ -2839,6 +2853,7 @@ static void drain_local_pages_wq(struct
drain_local_pages(drain->zone);
preempt_enable();
}
@@ -108,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Spill all the per-cpu pages from all CPUs back into the buddy allocator.
-@@ -2790,6 +2805,14 @@ void drain_all_pages(struct zone *zone)
+@@ -2906,6 +2921,14 @@ void drain_all_pages(struct zone *zone)
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
@@ -123,7 +123,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for_each_cpu(cpu, &cpus_with_pcps) {
struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
-@@ -2799,6 +2822,7 @@ void drain_all_pages(struct zone *zone)
+@@ -2915,6 +2938,7 @@ void drain_all_pages(struct zone *zone)
}
for_each_cpu(cpu, &cpus_with_pcps)
flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
@@ -131,7 +131,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
mutex_unlock(&pcpu_drain_mutex);
}
-@@ -2918,9 +2942,9 @@ void free_unref_page(struct page *page)
+@@ -3034,9 +3058,9 @@ void free_unref_page(struct page *page)
if (!free_unref_page_prepare(page, pfn))
return;
@@ -143,7 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!list_empty(&dst))
free_pcppages_bulk(zone, &dst, false);
}
-@@ -2947,7 +2971,7 @@ void free_unref_page_list(struct list_he
+@@ -3063,7 +3087,7 @@ void free_unref_page_list(struct list_he
set_page_private(page, pfn);
}
@@ -152,7 +152,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_private(page);
enum zone_type type;
-@@ -2962,12 +2986,12 @@ void free_unref_page_list(struct list_he
+@@ -3078,12 +3102,12 @@ void free_unref_page_list(struct list_he
* a large list of pages to free.
*/
if (++batch_count == SWAP_CLUSTER_MAX) {
@@ -168,7 +168,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for (i = 0; i < __MAX_NR_ZONES; ) {
struct page *page;
-@@ -3118,7 +3142,7 @@ static struct page *rmqueue_pcplist(stru
+@@ -3233,7 +3257,7 @@ static struct page *rmqueue_pcplist(stru
struct page *page;
unsigned long flags;
@@ -177,8 +177,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list);
-@@ -3126,7 +3150,7 @@ static struct page *rmqueue_pcplist(stru
- __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
+@@ -3241,7 +3265,7 @@ static struct page *rmqueue_pcplist(stru
+ __count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
zone_statistics(preferred_zone, zone);
}
- local_irq_restore(flags);
@@ -186,7 +186,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return page;
}
-@@ -3153,7 +3177,7 @@ struct page *rmqueue(struct zone *prefer
+@@ -3268,7 +3292,7 @@ struct page *rmqueue(struct zone *prefer
* allocate greater than order-1 page units with __GFP_NOFAIL.
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
@@ -195,7 +195,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
page = NULL;
-@@ -3173,7 +3197,7 @@ struct page *rmqueue(struct zone *prefer
+@@ -3288,7 +3312,7 @@ struct page *rmqueue(struct zone *prefer
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
@@ -204,7 +204,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
out:
/* Separate test+clear to avoid unnecessary atomics */
-@@ -3186,7 +3210,7 @@ struct page *rmqueue(struct zone *prefer
+@@ -3301,7 +3325,7 @@ struct page *rmqueue(struct zone *prefer
return page;
failed:
@@ -213,7 +213,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NULL;
}
-@@ -8347,7 +8371,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8490,7 +8514,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -222,7 +222,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -8356,7 +8380,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8499,7 +8523,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
diff --git a/debian/patches-rt/mm-perform-lru_add_drain_all-remotely.patch b/debian/patches-rt/mm-perform-lru_add_drain_all-remotely.patch
index 29d986b2d..6940fc7e5 100644
--- a/debian/patches-rt/mm-perform-lru_add_drain_all-remotely.patch
+++ b/debian/patches-rt/mm-perform-lru_add_drain_all-remotely.patch
@@ -1,7 +1,7 @@
From: Luiz Capitulino <lcapitulino@redhat.com>
Date: Fri, 27 May 2016 15:03:28 +0200
Subject: [PATCH] mm: perform lru_add_drain_all() remotely
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run
on all CPUs that have non-empty LRU pagevecs and then waiting for
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/swap.c
+++ b/mm/swap.c
-@@ -584,9 +584,15 @@ void lru_add_drain_cpu(int cpu)
+@@ -585,9 +585,15 @@ void lru_add_drain_cpu(int cpu)
unsigned long flags;
/* No harm done if a racing interrupt already did this */
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
-@@ -656,6 +662,16 @@ void lru_add_drain(void)
+@@ -657,6 +663,16 @@ void lru_add_drain(void)
#ifdef CONFIG_SMP
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
static void lru_add_drain_per_cpu(struct work_struct *dummy)
-@@ -663,6 +679,16 @@ static void lru_add_drain_per_cpu(struct
+@@ -664,6 +680,16 @@ static void lru_add_drain_per_cpu(struct
lru_add_drain();
}
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Doesn't need any cpu hotplug locking because we do rely on per-cpu
* kworkers being shut down before our page_alloc_cpu_dead callback is
-@@ -687,21 +713,19 @@ void lru_add_drain_all(void)
+@@ -688,21 +714,19 @@ void lru_add_drain_all(void)
cpumask_clear(&has_work);
for_each_online_cpu(cpu) {
diff --git a/debian/patches-rt/mm-protect-activate-switch-mm.patch b/debian/patches-rt/mm-protect-activate-switch-mm.patch
index 0e65bf316..80266170d 100644
--- a/debian/patches-rt/mm-protect-activate-switch-mm.patch
+++ b/debian/patches-rt/mm-protect-activate-switch-mm.patch
@@ -1,7 +1,7 @@
From: Yong Zhang <yong.zhang0@gmail.com>
Date: Tue, 15 May 2012 13:53:56 +0800
Subject: mm: Protect activate_mm() by preempt_[disable&enable]_rt()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
User preempt_*_rt instead of local_irq_*_rt or otherwise there will be
warning on ARM like below:
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/exec.c
+++ b/fs/exec.c
-@@ -1031,12 +1031,14 @@ static int exec_mmap(struct mm_struct *m
+@@ -1032,12 +1032,14 @@ static int exec_mmap(struct mm_struct *m
}
}
task_lock(tsk);
diff --git a/debian/patches-rt/mm-rt-kmap-atomic-scheduling.patch b/debian/patches-rt/mm-rt-kmap-atomic-scheduling.patch
index 71f484563..f882f9d16 100644
--- a/debian/patches-rt/mm-rt-kmap-atomic-scheduling.patch
+++ b/debian/patches-rt/mm-rt-kmap-atomic-scheduling.patch
@@ -1,7 +1,7 @@
Subject: mm, rt: kmap_atomic scheduling
From: Peter Zijlstra <peterz@infradead.org>
Date: Thu, 28 Jul 2011 10:43:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
In fact, with migrate_disable() existing one could play games with
kmap_atomic. You could save/restore the kmap_atomic slots on context
@@ -39,7 +39,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
#include <asm/pgtable.h>
#include <asm/ldt.h>
-@@ -195,6 +196,35 @@ start_thread(struct pt_regs *regs, unsig
+@@ -202,6 +203,35 @@ start_thread(struct pt_regs *regs, unsig
}
EXPORT_SYMBOL_GPL(start_thread);
@@ -75,7 +75,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
/*
* switch_to(x,y) should switch tasks from x to y.
-@@ -265,6 +295,8 @@ EXPORT_SYMBOL_GPL(start_thread);
+@@ -272,6 +302,8 @@ EXPORT_SYMBOL_GPL(start_thread);
switch_to_extra(prev_p, next_p);
@@ -86,7 +86,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
* This must be done before restoring TLS segments so
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
-@@ -32,10 +32,11 @@ EXPORT_SYMBOL(kunmap);
+@@ -33,10 +33,11 @@ EXPORT_SYMBOL(kunmap);
*/
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
@@ -99,7 +99,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
pagefault_disable();
if (!PageHighMem(page))
-@@ -45,7 +46,10 @@ void *kmap_atomic_prot(struct page *page
+@@ -46,7 +47,10 @@ void *kmap_atomic_prot(struct page *page
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx)));
@@ -111,7 +111,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
arch_flush_lazy_mmu_mode();
return (void *)vaddr;
-@@ -88,6 +92,9 @@ void __kunmap_atomic(void *kvaddr)
+@@ -89,6 +93,9 @@ void __kunmap_atomic(void *kvaddr)
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
@@ -121,7 +121,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
arch_flush_lazy_mmu_mode();
-@@ -100,7 +107,7 @@ void __kunmap_atomic(void *kvaddr)
+@@ -101,7 +108,7 @@ void __kunmap_atomic(void *kvaddr)
#endif
pagefault_enable();
@@ -132,7 +132,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
-@@ -59,6 +59,7 @@ EXPORT_SYMBOL_GPL(iomap_free);
+@@ -46,6 +46,7 @@ EXPORT_SYMBOL_GPL(iomap_free);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{
@@ -140,7 +140,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
unsigned long vaddr;
int idx, type;
-@@ -68,7 +69,10 @@ void *kmap_atomic_prot_pfn(unsigned long
+@@ -55,7 +56,10 @@ void *kmap_atomic_prot_pfn(unsigned long
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -152,7 +152,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
arch_flush_lazy_mmu_mode();
return (void *)vaddr;
-@@ -119,6 +123,9 @@ iounmap_atomic(void __iomem *kvaddr)
+@@ -106,6 +110,9 @@ iounmap_atomic(void __iomem *kvaddr)
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
@@ -248,9 +248,9 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
-@@ -1210,6 +1211,12 @@ struct task_struct {
- int softirq_nestcnt;
- unsigned int softirqs_raised;
+@@ -1209,6 +1210,12 @@ struct task_struct {
+ #ifdef CONFIG_PREEMPT_RT_BASE
+ struct rcu_head put_rcu;
#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
diff --git a/debian/patches-rt/mm-scatterlist-dont-disable-irqs-on-RT.patch b/debian/patches-rt/mm-scatterlist-dont-disable-irqs-on-RT.patch
index 23a8a0640..c1c023b63 100644
--- a/debian/patches-rt/mm-scatterlist-dont-disable-irqs-on-RT.patch
+++ b/debian/patches-rt/mm-scatterlist-dont-disable-irqs-on-RT.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 3 Jul 2009 08:44:34 -0500
Subject: mm/scatterlist: Do not disable irqs on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
For -RT it is enough to keep pagefault disabled (which is currently handled by
kmap_atomic()).
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
-@@ -776,7 +776,7 @@ void sg_miter_stop(struct sg_mapping_ite
+@@ -800,7 +800,7 @@ void sg_miter_stop(struct sg_mapping_ite
flush_kernel_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {
diff --git a/debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch b/debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch
index 5be89b0cf..d37a6eee6 100644
--- a/debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch
+++ b/debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch
@@ -1,7 +1,7 @@
Subject: mm/vmalloc: Another preempt disable region which sucks
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 12 Jul 2011 11:39:36 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Avoid the preempt disable version of get_cpu_var(). The inner-lock should
provide enough serialisation.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
-@@ -852,7 +852,7 @@ static void *new_vmap_block(unsigned int
+@@ -1400,7 +1400,7 @@ static void *new_vmap_block(unsigned int
struct vmap_block *vb;
struct vmap_area *va;
unsigned long vb_idx;
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *vaddr;
node = numa_node_id();
-@@ -895,11 +895,12 @@ static void *new_vmap_block(unsigned int
+@@ -1443,11 +1443,12 @@ static void *new_vmap_block(unsigned int
BUG_ON(err);
radix_tree_preload_end();
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return vaddr;
}
-@@ -968,6 +969,7 @@ static void *vb_alloc(unsigned long size
+@@ -1516,6 +1517,7 @@ static void *vb_alloc(unsigned long size
struct vmap_block *vb;
void *vaddr = NULL;
unsigned int order;
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
BUG_ON(offset_in_page(size));
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
-@@ -982,7 +984,8 @@ static void *vb_alloc(unsigned long size
+@@ -1530,7 +1532,8 @@ static void *vb_alloc(unsigned long size
order = get_order(size);
rcu_read_lock();
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
unsigned long pages_off;
-@@ -1005,7 +1008,7 @@ static void *vb_alloc(unsigned long size
+@@ -1553,7 +1556,7 @@ static void *vb_alloc(unsigned long size
break;
}
diff --git a/debian/patches-rt/mm-workingset-replace-IRQ-off-check-with-a-lockdep-a.patch b/debian/patches-rt/mm-workingset-replace-IRQ-off-check-with-a-lockdep-a.patch
index 0b96d32fa..b64e29581 100644
--- a/debian/patches-rt/mm-workingset-replace-IRQ-off-check-with-a-lockdep-a.patch
+++ b/debian/patches-rt/mm-workingset-replace-IRQ-off-check-with-a-lockdep-a.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 11 Feb 2019 10:40:46 +0100
Subject: [PATCH] mm: workingset: replace IRQ-off check with a lockdep assert.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Commit
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/workingset.c
+++ b/mm/workingset.c
-@@ -368,6 +368,8 @@ static struct list_lru shadow_nodes;
+@@ -367,6 +367,8 @@ static struct list_lru shadow_nodes;
void workingset_update_node(struct xa_node *node)
{
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Track non-empty nodes that contain only shadow entries;
* unlink those that contain pages or are being freed.
-@@ -376,7 +378,8 @@ void workingset_update_node(struct xa_no
+@@ -375,7 +377,8 @@ void workingset_update_node(struct xa_no
* already where they should be. The list_empty() test is safe
* as node->private_list is protected by the i_pages lock.
*/
diff --git a/debian/patches-rt/mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch b/debian/patches-rt/mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch
new file mode 100644
index 000000000..a7811bfd2
--- /dev/null
+++ b/debian/patches-rt/mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch
@@ -0,0 +1,118 @@
+From: "Luis Claudio R. Goncalves" <lclaudio@uudg.org>
+Date: Tue, 25 Jun 2019 11:28:04 -0300
+Subject: [PATCH] mm/zswap: Do not disable preemption in
+ zswap_frontswap_store()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+Zswap causes "BUG: scheduling while atomic" by blocking on a rt_spin_lock() with
+preemption disabled. The preemption is disabled by get_cpu_var() in
+zswap_frontswap_store() to protect the access of the zswap_dstmem percpu variable.
+
+Use get_locked_var() to protect the percpu zswap_dstmem variable, making the
+code preemptive.
+
+As get_cpu_ptr() also disables preemption, replace it by this_cpu_ptr() and
+remove the counterpart put_cpu_ptr().
+
+Steps to Reproduce:
+
+ 1. # grubby --args "zswap.enabled=1" --update-kernel DEFAULT
+ 2. # reboot
+ 3. Calculate the amount o memory to be used by the test:
+ ---> grep MemAvailable /proc/meminfo
+ ---> Add 25% ~ 50% to that value
+ 4. # stress --vm 1 --vm-bytes ${MemAvailable+25%} --timeout 240s
+
+Usually, in less than 5 minutes the backtrace listed below appears, followed
+by a kernel panic:
+
+| BUG: scheduling while atomic: kswapd1/181/0x00000002
+|
+| Preemption disabled at:
+| [<ffffffff8b2a6cda>] zswap_frontswap_store+0x21a/0x6e1
+|
+| Kernel panic - not syncing: scheduling while atomic
+| CPU: 14 PID: 181 Comm: kswapd1 Kdump: loaded Not tainted 5.0.14-rt9 #1
+| Hardware name: AMD Pence/Pence, BIOS WPN2321X_Weekly_12_03_21 03/19/2012
+| Call Trace:
+| panic+0x106/0x2a7
+| __schedule_bug.cold+0x3f/0x51
+| __schedule+0x5cb/0x6f0
+| schedule+0x43/0xd0
+| rt_spin_lock_slowlock_locked+0x114/0x2b0
+| rt_spin_lock_slowlock+0x51/0x80
+| zbud_alloc+0x1da/0x2d0
+| zswap_frontswap_store+0x31a/0x6e1
+| __frontswap_store+0xab/0x130
+| swap_writepage+0x39/0x70
+| pageout.isra.0+0xe3/0x320
+| shrink_page_list+0xa8e/0xd10
+| shrink_inactive_list+0x251/0x840
+| shrink_node_memcg+0x213/0x770
+| shrink_node+0xd9/0x450
+| balance_pgdat+0x2d5/0x510
+| kswapd+0x218/0x470
+| kthread+0xfb/0x130
+| ret_from_fork+0x27/0x50
+
+Cc: stable-rt@vger.kernel.org
+Reported-by: Ping Fang <pifang@redhat.com>
+Signed-off-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com>
+Reviewed-by: Daniel Bristot de Oliveira <bristot@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/zswap.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/mm/zswap.c
++++ b/mm/zswap.c
+@@ -18,6 +18,7 @@
+ #include <linux/highmem.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
++#include <linux/locallock.h>
+ #include <linux/types.h>
+ #include <linux/atomic.h>
+ #include <linux/frontswap.h>
+@@ -981,6 +982,8 @@ static void zswap_fill_page(void *ptr, u
+ memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
+ }
+
++/* protect zswap_dstmem from concurrency */
++static DEFINE_LOCAL_IRQ_LOCK(zswap_dstmem_lock);
+ /*********************************
+ * frontswap hooks
+ **********************************/
+@@ -1057,12 +1060,11 @@ static int zswap_frontswap_store(unsigne
+ }
+
+ /* compress */
+- dst = get_cpu_var(zswap_dstmem);
+- tfm = *get_cpu_ptr(entry->pool->tfm);
++ dst = get_locked_var(zswap_dstmem_lock, zswap_dstmem);
++ tfm = *this_cpu_ptr(entry->pool->tfm);
+ src = kmap_atomic(page);
+ ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
+ kunmap_atomic(src);
+- put_cpu_ptr(entry->pool->tfm);
+ if (ret) {
+ ret = -EINVAL;
+ goto put_dstmem;
+@@ -1085,7 +1087,7 @@ static int zswap_frontswap_store(unsigne
+ memcpy(buf, &zhdr, hlen);
+ memcpy(buf + hlen, dst, dlen);
+ zpool_unmap_handle(entry->pool->zpool, handle);
+- put_cpu_var(zswap_dstmem);
++ put_locked_var(zswap_dstmem_lock, zswap_dstmem);
+
+ /* populate entry */
+ entry->offset = offset;
+@@ -1113,7 +1115,7 @@ static int zswap_frontswap_store(unsigne
+ return 0;
+
+ put_dstmem:
+- put_cpu_var(zswap_dstmem);
++ put_locked_var(zswap_dstmem_lock, zswap_dstmem);
+ zswap_pool_put(entry->pool);
+ freepage:
+ zswap_entry_cache_free(entry);
diff --git a/debian/patches-rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch b/debian/patches-rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
index 80c6676c4..44f0447bf 100644
--- a/debian/patches-rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
+++ b/debian/patches-rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
@@ -1,7 +1,7 @@
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Tue, 22 Mar 2016 11:16:09 +0100
Subject: [PATCH] mm/zsmalloc: copy with get_cpu_var() and locking
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
get_cpu_var() disables preemption and triggers a might_sleep() splat later.
This is replaced with get_locked_var().
diff --git a/debian/patches-rt/mutex-no-spin-on-rt.patch b/debian/patches-rt/mutex-no-spin-on-rt.patch
index ade291ba3..fd50f8ad2 100644
--- a/debian/patches-rt/mutex-no-spin-on-rt.patch
+++ b/debian/patches-rt/mutex-no-spin-on-rt.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 21:51:45 +0200
Subject: locking: Disable spin on owner for RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Drop spin on owner for mutex / rwsem. We are most likely not using it
but…
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
-@@ -225,11 +225,11 @@ config ARCH_SUPPORTS_ATOMIC_RMW
+@@ -226,11 +226,11 @@ config ARCH_SUPPORTS_ATOMIC_RMW
config MUTEX_SPIN_ON_OWNER
def_bool y
@@ -22,8 +22,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
config RWSEM_SPIN_ON_OWNER
def_bool y
-- depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
-+ depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
+- depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW
++ depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
config LOCK_SPIN_ON_OWNER
def_bool y
diff --git a/debian/patches-rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/debian/patches-rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
index ce85aa7e8..b1aa46eb1 100644
--- a/debian/patches-rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
+++ b/debian/patches-rt/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
@@ -2,7 +2,7 @@ From: Steven Rostedt <rostedt@goodmis.org>
Date: Tue, 6 Dec 2016 17:50:30 -0500
Subject: [PATCH] net: Have __napi_schedule_irqoff() disable interrupts on
RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
A customer hit a crash where the napi sd->poll_list became corrupted.
The customer had the bnx2x driver, which does a
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -422,7 +422,19 @@ typedef enum rx_handler_result rx_handle
+@@ -419,7 +419,19 @@ typedef enum rx_handler_result rx_handle
typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
void __napi_schedule(struct napi_struct *n);
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -6036,6 +6036,7 @@ bool napi_schedule_prep(struct napi_stru
+@@ -6026,6 +6026,7 @@ bool napi_schedule_prep(struct napi_stru
}
EXPORT_SYMBOL(napi_schedule_prep);
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* __napi_schedule_irqoff - schedule for receive
* @n: entry to schedule
-@@ -6047,6 +6048,7 @@ void __napi_schedule_irqoff(struct napi_
+@@ -6037,6 +6038,7 @@ void __napi_schedule_irqoff(struct napi_
____napi_schedule(this_cpu_ptr(&softnet_data), n);
}
EXPORT_SYMBOL(__napi_schedule_irqoff);
diff --git a/debian/patches-rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch b/debian/patches-rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch
index 0d1049d93..93d98da5d 100644
--- a/debian/patches-rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch
+++ b/debian/patches-rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 14 Sep 2016 17:36:35 +0200
Subject: [PATCH] net/Qdisc: use a seqlock instead seqcount
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The seqcount disables preemption on -RT while it is held which can't
remove. Also we don't want the reader to spin for ages if the writer is
@@ -111,8 +111,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#include <net/net_seq_lock.h>
#include <linux/refcount.h>
#include <linux/workqueue.h>
- #include <net/gen_stats.h>
-@@ -104,7 +105,7 @@ struct Qdisc {
+ #include <linux/mutex.h>
+@@ -102,7 +103,7 @@ struct Qdisc {
struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
@@ -121,7 +121,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct gnet_stats_queue qstats;
unsigned long state;
struct Qdisc *next_sched;
-@@ -139,7 +140,11 @@ static inline bool qdisc_is_running(stru
+@@ -140,7 +141,11 @@ static inline bool qdisc_is_running(stru
{
if (qdisc->flags & TCQ_F_NOLOCK)
return spin_is_locked(&qdisc->seqlock);
@@ -132,8 +132,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
}
- static inline bool qdisc_run_begin(struct Qdisc *qdisc)
-@@ -150,17 +155,27 @@ static inline bool qdisc_run_begin(struc
+ static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
+@@ -164,17 +169,27 @@ static inline bool qdisc_run_begin(struc
} else if (qdisc_is_running(qdisc)) {
return false;
}
@@ -161,7 +161,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (qdisc->flags & TCQ_F_NOLOCK)
spin_unlock(&qdisc->seqlock);
}
-@@ -477,7 +492,7 @@ static inline spinlock_t *qdisc_root_sle
+@@ -554,7 +569,7 @@ static inline spinlock_t *qdisc_root_sle
return qdisc_lock(root);
}
@@ -172,7 +172,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
-@@ -46,7 +46,7 @@
+@@ -42,7 +42,7 @@
struct net_rate_estimator {
struct gnet_stats_basic_packed *bstats;
spinlock_t *stats_lock;
@@ -181,7 +181,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
u8 ewma_log;
u8 intvl_log; /* period : (250ms << intvl_log) */
-@@ -129,7 +129,7 @@ int gen_new_estimator(struct gnet_stats_
+@@ -125,7 +125,7 @@ int gen_new_estimator(struct gnet_stats_
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
spinlock_t *lock,
@@ -190,7 +190,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct nlattr *opt)
{
struct gnet_estimator *parm = nla_data(opt);
-@@ -227,7 +227,7 @@ int gen_replace_estimator(struct gnet_st
+@@ -223,7 +223,7 @@ int gen_replace_estimator(struct gnet_st
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
spinlock_t *lock,
@@ -201,7 +201,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
lock, running, opt);
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
-@@ -142,7 +142,7 @@ static void
+@@ -138,7 +138,7 @@ static void
}
void
@@ -210,7 +210,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b)
-@@ -155,15 +155,15 @@ void
+@@ -151,15 +151,15 @@ void
}
do {
if (running)
@@ -229,7 +229,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b,
-@@ -204,7 +204,7 @@ static int
+@@ -200,7 +200,7 @@ static int
* if the room in the socket buffer was not sufficient.
*/
int
@@ -238,7 +238,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b)
-@@ -228,7 +228,7 @@ EXPORT_SYMBOL(gnet_stats_copy_basic);
+@@ -224,7 +224,7 @@ EXPORT_SYMBOL(gnet_stats_copy_basic);
* if the room in the socket buffer was not sufficient.
*/
int
@@ -249,7 +249,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct gnet_stats_basic_packed *b)
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
-@@ -1242,7 +1242,7 @@ static struct Qdisc *qdisc_create(struct
+@@ -1248,7 +1248,7 @@ static struct Qdisc *qdisc_create(struct
rcu_assign_pointer(sch->stab, stab);
}
if (tca[TCA_RATE]) {
@@ -260,7 +260,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (sch->flags & TCQ_F_MQROOT) {
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
-@@ -570,7 +570,11 @@ struct Qdisc noop_qdisc = {
+@@ -552,7 +552,11 @@ struct Qdisc noop_qdisc = {
.ops = &noop_qdisc_ops,
.q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
.dev_queue = &noop_netdev_queue,
@@ -272,7 +272,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
.busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
.gso_skb = {
.next = (struct sk_buff *)&noop_qdisc.gso_skb,
-@@ -871,9 +875,17 @@ struct Qdisc *qdisc_alloc(struct netdev_
+@@ -850,9 +854,17 @@ struct Qdisc *qdisc_alloc(struct netdev_
lockdep_set_class(&sch->busylock,
dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
diff --git a/debian/patches-rt/net-add-a-lock-around-icmp_sk.patch b/debian/patches-rt/net-add-a-lock-around-icmp_sk.patch
deleted file mode 100644
index ebf7314bd..000000000
--- a/debian/patches-rt/net-add-a-lock-around-icmp_sk.patch
+++ /dev/null
@@ -1,59 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 31 Aug 2016 17:54:09 +0200
-Subject: [PATCH] net: add a lock around icmp_sk()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-It looks like the this_cpu_ptr() access in icmp_sk() is protected with
-local_bh_disable(). To avoid missing serialization in -RT I am adding
-here a local lock. No crash has been observed, this is just precaution.
-
-Cc: stable-rt@vger.kernel.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- net/ipv4/icmp.c | 8 ++++++++
- 1 file changed, 8 insertions(+)
-
---- a/net/ipv4/icmp.c
-+++ b/net/ipv4/icmp.c
-@@ -77,6 +77,7 @@
- #include <linux/string.h>
- #include <linux/netfilter_ipv4.h>
- #include <linux/slab.h>
-+#include <linux/locallock.h>
- #include <net/snmp.h>
- #include <net/ip.h>
- #include <net/route.h>
-@@ -204,6 +205,8 @@ static const struct icmp_control icmp_po
- *
- * On SMP we have one ICMP socket per-cpu.
- */
-+static DEFINE_LOCAL_IRQ_LOCK(icmp_sk_lock);
-+
- static struct sock *icmp_sk(struct net *net)
- {
- return *this_cpu_ptr(net->ipv4.icmp_sk);
-@@ -214,12 +217,16 @@ static inline struct sock *icmp_xmit_loc
- {
- struct sock *sk;
-
-+ if (!local_trylock(icmp_sk_lock))
-+ return NULL;
-+
- sk = icmp_sk(net);
-
- if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
- /* This can happen if the output path signals a
- * dst_link_failure() for an outgoing ICMP packet.
- */
-+ local_unlock(icmp_sk_lock);
- return NULL;
- }
- return sk;
-@@ -228,6 +235,7 @@ static inline struct sock *icmp_xmit_loc
- static inline void icmp_xmit_unlock(struct sock *sk)
- {
- spin_unlock(&sk->sk_lock.slock);
-+ local_unlock(icmp_sk_lock);
- }
-
- int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
diff --git a/debian/patches-rt/net-add-back-the-missing-serialization-in-ip_send_un.patch b/debian/patches-rt/net-add-back-the-missing-serialization-in-ip_send_un.patch
deleted file mode 100644
index fce60c887..000000000
--- a/debian/patches-rt/net-add-back-the-missing-serialization-in-ip_send_un.patch
+++ /dev/null
@@ -1,93 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 31 Aug 2016 17:21:56 +0200
-Subject: [PATCH] net: add back the missing serialization in
- ip_send_unicast_reply()
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Some time ago Sami Pietikäinen reported a crash on -RT in
-ip_send_unicast_reply() which was later fixed by Nicholas Mc Guire
-(v3.12.8-rt11). Later (v3.18.8) the code was reworked and I dropped the
-patch. As it turns out it was mistake.
-I have reports that the same crash is possible with a similar backtrace.
-It seems that vanilla protects access to this_cpu_ptr() via
-local_bh_disable(). This does not work the on -RT since we can have
-NET_RX and NET_TX running in parallel on the same CPU.
-This is brings back the old locks.
-
-|Unable to handle kernel NULL pointer dereference at virtual address 00000010
-|PC is at __ip_make_skb+0x198/0x3e8
-|[<c04e39d8>] (__ip_make_skb) from [<c04e3ca8>] (ip_push_pending_frames+0x20/0x40)
-|[<c04e3ca8>] (ip_push_pending_frames) from [<c04e3ff0>] (ip_send_unicast_reply+0x210/0x22c)
-|[<c04e3ff0>] (ip_send_unicast_reply) from [<c04fbb54>] (tcp_v4_send_reset+0x190/0x1c0)
-|[<c04fbb54>] (tcp_v4_send_reset) from [<c04fcc1c>] (tcp_v4_do_rcv+0x22c/0x288)
-|[<c04fcc1c>] (tcp_v4_do_rcv) from [<c0474364>] (release_sock+0xb4/0x150)
-|[<c0474364>] (release_sock) from [<c04ed904>] (tcp_close+0x240/0x454)
-|[<c04ed904>] (tcp_close) from [<c0511408>] (inet_release+0x74/0x7c)
-|[<c0511408>] (inet_release) from [<c0470728>] (sock_release+0x30/0xb0)
-|[<c0470728>] (sock_release) from [<c0470abc>] (sock_close+0x1c/0x24)
-|[<c0470abc>] (sock_close) from [<c0115ec4>] (__fput+0xe8/0x20c)
-|[<c0115ec4>] (__fput) from [<c0116050>] (____fput+0x18/0x1c)
-|[<c0116050>] (____fput) from [<c0058138>] (task_work_run+0xa4/0xb8)
-|[<c0058138>] (task_work_run) from [<c0011478>] (do_work_pending+0xd0/0xe4)
-|[<c0011478>] (do_work_pending) from [<c000e740>] (work_pending+0xc/0x20)
-|Code: e3530001 8a000001 e3a00040 ea000011 (e5973010)
-
-Cc: stable-rt@vger.kernel.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- net/ipv4/tcp_ipv4.c | 6 ++++++
- 1 file changed, 6 insertions(+)
-
---- a/net/ipv4/tcp_ipv4.c
-+++ b/net/ipv4/tcp_ipv4.c
-@@ -62,6 +62,7 @@
- #include <linux/init.h>
- #include <linux/times.h>
- #include <linux/slab.h>
-+#include <linux/locallock.h>
-
- #include <net/net_namespace.h>
- #include <net/icmp.h>
-@@ -637,6 +638,7 @@ void tcp_v4_send_check(struct sock *sk,
- }
- EXPORT_SYMBOL(tcp_v4_send_check);
-
-+static DEFINE_LOCAL_IRQ_LOCK(tcp_sk_lock);
- /*
- * This routine will send an RST to the other tcp.
- *
-@@ -771,6 +773,7 @@ static void tcp_v4_send_reset(const stru
- arg.tos = ip_hdr(skb)->tos;
- arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
- local_bh_disable();
-+ local_lock(tcp_sk_lock);
- ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk);
- if (sk)
- ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
-@@ -783,6 +786,7 @@ static void tcp_v4_send_reset(const stru
- ctl_sk->sk_mark = 0;
- __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
- __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
-+ local_unlock(tcp_sk_lock);
- local_bh_enable();
-
- #ifdef CONFIG_TCP_MD5SIG
-@@ -863,6 +867,7 @@ static void tcp_v4_send_ack(const struct
- arg.tos = tos;
- arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
- local_bh_disable();
-+ local_lock(tcp_sk_lock);
- ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk);
- if (sk)
- ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
-@@ -874,6 +879,7 @@ static void tcp_v4_send_ack(const struct
-
- ctl_sk->sk_mark = 0;
- __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
-+ local_unlock(tcp_sk_lock);
- local_bh_enable();
- }
-
diff --git a/debian/patches-rt/net-another-local-irq-disable-alloc-atomic-headache.patch b/debian/patches-rt/net-another-local-irq-disable-alloc-atomic-headache.patch
index b39f1589f..6a5501521 100644
--- a/debian/patches-rt/net-another-local-irq-disable-alloc-atomic-headache.patch
+++ b/debian/patches-rt/net-another-local-irq-disable-alloc-atomic-headache.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 26 Sep 2012 16:21:08 +0200
Subject: net: Another local_irq_disable/kmalloc headache
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Replace it by a local lock. Though that's pretty inefficient :(
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
-@@ -63,6 +63,7 @@
+@@ -59,6 +59,7 @@
#include <linux/errqueue.h>
#include <linux/prefetch.h>
#include <linux/if_vlan.h>
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <net/protocol.h>
#include <net/dst.h>
-@@ -333,6 +334,7 @@ struct napi_alloc_cache {
+@@ -364,6 +365,7 @@ struct napi_alloc_cache {
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
-@@ -340,10 +342,10 @@ static void *__netdev_alloc_frag(unsigne
+@@ -371,10 +373,10 @@ static void *__netdev_alloc_frag(unsigne
unsigned long flags;
void *data;
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return data;
}
-@@ -415,13 +417,13 @@ struct sk_buff *__netdev_alloc_skb(struc
+@@ -446,13 +448,13 @@ struct sk_buff *__netdev_alloc_skb(struc
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
diff --git a/debian/patches-rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch b/debian/patches-rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch
index 816d8ff6f..a221150a9 100644
--- a/debian/patches-rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch
+++ b/debian/patches-rt/net-core-protect-users-of-napi_alloc_cache-against-r.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 15 Jan 2016 16:33:34 +0100
Subject: net/core: protect users of napi_alloc_cache against
reentrance
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
On -RT the code running in BH can not be moved to another CPU so CPU
local variable remain local. However the code can be preempted
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
-@@ -335,6 +335,7 @@ struct napi_alloc_cache {
+@@ -366,6 +366,7 @@ struct napi_alloc_cache {
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
-@@ -366,9 +367,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
+@@ -397,9 +398,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
void *napi_alloc_frag(unsigned int fragsz)
-@@ -464,9 +469,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
+@@ -495,9 +500,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
gfp_t gfp_mask)
{
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
len += NET_SKB_PAD + NET_IP_ALIGN;
-@@ -484,7 +490,10 @@ struct sk_buff *__napi_alloc_skb(struct
+@@ -515,7 +521,10 @@ struct sk_buff *__napi_alloc_skb(struct
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(!data))
return NULL;
-@@ -495,7 +504,7 @@ struct sk_buff *__napi_alloc_skb(struct
+@@ -526,7 +535,7 @@ struct sk_buff *__napi_alloc_skb(struct
}
/* use OR instead of assignment to avoid clearing of bits in mask */
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
skb->pfmemalloc = 1;
skb->head_frag = 1;
-@@ -724,23 +733,26 @@ void __consume_stateless_skb(struct sk_b
+@@ -755,23 +764,26 @@ void __consume_stateless_skb(struct sk_b
void __kfree_skb_flush(void)
{
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* record skb to CPU local list */
nc->skb_cache[nc->skb_count++] = skb;
-@@ -755,6 +767,7 @@ static inline void _kfree_skb_defer(stru
+@@ -786,6 +798,7 @@ static inline void _kfree_skb_defer(stru
nc->skb_cache);
nc->skb_count = 0;
}
diff --git a/debian/patches-rt/net-core-use-local_bh_disable-in-netif_rx_ni.patch b/debian/patches-rt/net-core-use-local_bh_disable-in-netif_rx_ni.patch
index 80d174c17..dd5c8676c 100644
--- a/debian/patches-rt/net-core-use-local_bh_disable-in-netif_rx_ni.patch
+++ b/debian/patches-rt/net-core-use-local_bh_disable-in-netif_rx_ni.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 16 Jun 2017 19:03:16 +0200
Subject: [PATCH] net/core: use local_bh_disable() in netif_rx_ni()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
In 2004 netif_rx_ni() gained a preempt_disable() section around
netif_rx() and its do_softirq() + testing for it. The do_softirq() part
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4580,11 +4580,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -4562,11 +4562,9 @@ int netif_rx_ni(struct sk_buff *skb)
trace_netif_rx_ni_entry(skb);
diff --git a/debian/patches-rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/debian/patches-rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
index 24de155fd..7c4ed6eb5 100644
--- a/debian/patches-rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
+++ b/debian/patches-rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 30 Mar 2016 13:36:29 +0200
Subject: [PATCH] net: dev: always take qdisc's busylock in __dev_xmit_skb()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The root-lock is dropped before dev_hard_start_xmit() is invoked and after
setting the __QDISC___STATE_RUNNING bit. If this task is now pushed away
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3500,7 +3500,11 @@ static inline int __dev_xmit_skb(struct
+@@ -3505,7 +3505,11 @@ static inline int __dev_xmit_skb(struct
* This permits qdisc->running owner to get the lock more
* often and dequeue packets faster.
*/
diff --git a/debian/patches-rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch b/debian/patches-rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
index a5ee94394..184755253 100644
--- a/debian/patches-rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
+++ b/debian/patches-rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
@@ -1,7 +1,7 @@
Subject: net: netfilter: Serialize xt_write_recseq sections on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 28 Oct 2012 11:18:08 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The netfilter code relies only on the implicit semantics of
local_bh_disable() for serializing wt_write_recseq sections. RT breaks
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <uapi/linux/netfilter/x_tables.h>
/* Test a struct->invflags and a boolean for inequality */
-@@ -345,6 +346,8 @@ void xt_free_table_info(struct xt_table_
+@@ -344,6 +345,8 @@ void xt_free_table_info(struct xt_table_
*/
DECLARE_PER_CPU(seqcount_t, xt_recseq);
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* xt_tee_enabled - true if x_tables needs to handle reentrancy
*
* Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
-@@ -365,6 +368,9 @@ static inline unsigned int xt_write_recs
+@@ -364,6 +367,9 @@ static inline unsigned int xt_write_recs
{
unsigned int addend;
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Low order bit of sequence is set if we already
* called xt_write_recseq_begin().
-@@ -395,6 +401,7 @@ static inline void xt_write_recseq_end(u
+@@ -394,6 +400,7 @@ static inline void xt_write_recseq_end(u
/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
smp_wmb();
__this_cpu_add(xt_recseq.sequence, addend);
@@ -62,7 +62,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/mm.h>
#include <linux/rcupdate.h>
#include <net/net_namespace.h>
-@@ -27,6 +28,11 @@
+@@ -28,6 +29,11 @@
#include "nf_internals.h"
diff --git a/debian/patches-rt/net-make-devnet_rename_seq-a-mutex.patch b/debian/patches-rt/net-make-devnet_rename_seq-a-mutex.patch
index 9758e7c6a..0f3efdf0b 100644
--- a/debian/patches-rt/net-make-devnet_rename_seq-a-mutex.patch
+++ b/debian/patches-rt/net-make-devnet_rename_seq-a-mutex.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 20 Mar 2013 18:06:20 +0100
Subject: net: Add a mutex around devnet_rename_seq
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
On RT write_seqcount_begin() disables preemption and device_rename()
allocates memory with GFP_KERNEL and grabs later the sysfs_mutex
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -199,6 +199,7 @@ static unsigned int napi_gen_id = NR_CPU
+@@ -195,6 +195,7 @@ static unsigned int napi_gen_id = NR_CPU
static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
static seqcount_t devnet_rename_seq;
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline void dev_base_seq_inc(struct net *net)
{
-@@ -924,7 +925,8 @@ int netdev_get_name(struct net *net, cha
+@@ -920,7 +921,8 @@ int netdev_get_name(struct net *net, cha
strcpy(name, dev->name);
rcu_read_unlock();
if (read_seqcount_retry(&devnet_rename_seq, seq)) {
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto retry;
}
-@@ -1201,20 +1203,17 @@ int dev_change_name(struct net_device *d
+@@ -1197,20 +1199,17 @@ int dev_change_name(struct net_device *d
likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
return -EBUSY;
@@ -67,7 +67,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (oldname[0] && !strchr(oldname, '%'))
netdev_info(dev, "renamed from %s\n", oldname);
-@@ -1227,11 +1226,12 @@ int dev_change_name(struct net_device *d
+@@ -1223,11 +1222,12 @@ int dev_change_name(struct net_device *d
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
netdev_adjacent_rename_links(dev, oldname);
-@@ -1252,7 +1252,8 @@ int dev_change_name(struct net_device *d
+@@ -1248,7 +1248,8 @@ int dev_change_name(struct net_device *d
/* err >= 0 after dev_alloc_name() or stores the first errno */
if (err >= 0) {
err = ret;
@@ -93,7 +93,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
memcpy(dev->name, oldname, IFNAMSIZ);
memcpy(oldname, newname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
-@@ -1265,6 +1266,11 @@ int dev_change_name(struct net_device *d
+@@ -1261,6 +1262,11 @@ int dev_change_name(struct net_device *d
}
return err;
diff --git a/debian/patches-rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/debian/patches-rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
deleted file mode 100644
index 1805e89ff..000000000
--- a/debian/patches-rt/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
+++ /dev/null
@@ -1,266 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 13 Jan 2016 15:55:02 +0100
-Subject: net: move xmit_recursion to per-task variable on -RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-A softirq on -RT can be preempted. That means one task is in
-__dev_queue_xmit(), gets preempted and another task may enter
-__dev_queue_xmit() aw well. netperf together with a bridge device
-will then trigger the `recursion alert` because each task increments
-the xmit_recursion variable which is per-CPU.
-A virtual device like br0 is required to trigger this warning.
-
-This patch moves the lock owner and counter to be per task instead per-CPU so
-it counts the recursion properly on -RT. The owner is also a task now and not a
-CPU number.
-
-Cc: stable-rt@vger.kernel.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/netdevice.h | 95 ++++++++++++++++++++++++++++++++++++++++++----
- include/linux/sched.h | 3 +
- net/core/dev.c | 15 ++++---
- net/core/filter.c | 6 +-
- 4 files changed, 104 insertions(+), 15 deletions(-)
-
---- a/include/linux/netdevice.h
-+++ b/include/linux/netdevice.h
-@@ -616,7 +616,11 @@ struct netdev_queue {
- * write-mostly part
- */
- spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ struct task_struct *xmit_lock_owner;
-+#else
- int xmit_lock_owner;
-+#endif
- /*
- * Time (in jiffies) of last Tx
- */
-@@ -2651,14 +2655,53 @@ void netdev_freemem(struct net_device *d
- void synchronize_net(void);
- int init_dummy_netdev(struct net_device *dev);
-
--DECLARE_PER_CPU(int, xmit_recursion);
- #define XMIT_RECURSION_LIMIT 10
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static inline int dev_recursion_level(void)
-+{
-+ return current->xmit_recursion;
-+}
-+
-+static inline int xmit_rec_read(void)
-+{
-+ return current->xmit_recursion;
-+}
-+
-+static inline void xmit_rec_inc(void)
-+{
-+ current->xmit_recursion++;
-+}
-+
-+static inline void xmit_rec_dec(void)
-+{
-+ current->xmit_recursion--;
-+}
-+
-+#else
-+
-+DECLARE_PER_CPU(int, xmit_recursion);
-
- static inline int dev_recursion_level(void)
- {
- return this_cpu_read(xmit_recursion);
- }
-
-+static inline int xmit_rec_read(void)
-+{
-+ return __this_cpu_read(xmit_recursion);
-+}
-+
-+static inline void xmit_rec_inc(void)
-+{
-+ __this_cpu_inc(xmit_recursion);
-+}
-+
-+static inline void xmit_rec_dec(void)
-+{
-+ __this_cpu_dec(xmit_recursion);
-+}
-+#endif
-+
- struct net_device *dev_get_by_index(struct net *net, int ifindex);
- struct net_device *__dev_get_by_index(struct net *net, int ifindex);
- struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
-@@ -3868,10 +3911,48 @@ static inline u32 netif_msg_init(int deb
- return (1U << debug_value) - 1;
- }
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
-+{
-+ txq->xmit_lock_owner = current;
-+}
-+
-+static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
-+{
-+ txq->xmit_lock_owner = NULL;
-+}
-+
-+static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
-+{
-+ if (txq->xmit_lock_owner != NULL)
-+ return true;
-+ return false;
-+}
-+
-+#else
-+
-+static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
-+{
-+ txq->xmit_lock_owner = cpu;
-+}
-+
-+static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
-+{
-+ txq->xmit_lock_owner = -1;
-+}
-+
-+static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
-+{
-+ if (txq->xmit_lock_owner != -1)
-+ return true;
-+ return false;
-+}
-+#endif
-+
- static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
- {
- spin_lock(&txq->_xmit_lock);
-- txq->xmit_lock_owner = cpu;
-+ netdev_queue_set_owner(txq, cpu);
- }
-
- static inline bool __netif_tx_acquire(struct netdev_queue *txq)
-@@ -3888,32 +3969,32 @@ static inline void __netif_tx_release(st
- static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
- {
- spin_lock_bh(&txq->_xmit_lock);
-- txq->xmit_lock_owner = smp_processor_id();
-+ netdev_queue_set_owner(txq, smp_processor_id());
- }
-
- static inline bool __netif_tx_trylock(struct netdev_queue *txq)
- {
- bool ok = spin_trylock(&txq->_xmit_lock);
- if (likely(ok))
-- txq->xmit_lock_owner = smp_processor_id();
-+ netdev_queue_set_owner(txq, smp_processor_id());
- return ok;
- }
-
- static inline void __netif_tx_unlock(struct netdev_queue *txq)
- {
-- txq->xmit_lock_owner = -1;
-+ netdev_queue_clear_owner(txq);
- spin_unlock(&txq->_xmit_lock);
- }
-
- static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
- {
-- txq->xmit_lock_owner = -1;
-+ netdev_queue_clear_owner(txq);
- spin_unlock_bh(&txq->_xmit_lock);
- }
-
- static inline void txq_trans_update(struct netdev_queue *txq)
- {
-- if (txq->xmit_lock_owner != -1)
-+ if (netdev_queue_has_owner(txq))
- txq->trans_start = jiffies;
- }
-
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -1213,6 +1213,9 @@ struct task_struct {
- #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- unsigned long task_state_change;
- #endif
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ int xmit_recursion;
-+#endif
- int pagefault_disabled;
- #ifdef CONFIG_MMU
- struct task_struct *oom_reaper_list;
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -3572,8 +3572,10 @@ static void skb_update_prio(struct sk_bu
- #define skb_update_prio(skb)
- #endif
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- DEFINE_PER_CPU(int, xmit_recursion);
- EXPORT_SYMBOL(xmit_recursion);
-+#endif
-
- /**
- * dev_loopback_xmit - loop back @skb
-@@ -3864,9 +3866,12 @@ static int __dev_queue_xmit(struct sk_bu
- if (dev->flags & IFF_UP) {
- int cpu = smp_processor_id(); /* ok because BHs are off */
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ if (txq->xmit_lock_owner != current) {
-+#else
- if (txq->xmit_lock_owner != cpu) {
-- if (unlikely(__this_cpu_read(xmit_recursion) >
-- XMIT_RECURSION_LIMIT))
-+#endif
-+ if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT))
- goto recursion_alert;
-
- skb = validate_xmit_skb(skb, dev, &again);
-@@ -3876,9 +3881,9 @@ static int __dev_queue_xmit(struct sk_bu
- HARD_TX_LOCK(dev, txq, cpu);
-
- if (!netif_xmit_stopped(txq)) {
-- __this_cpu_inc(xmit_recursion);
-+ xmit_rec_inc();
- skb = dev_hard_start_xmit(skb, dev, txq, &rc);
-- __this_cpu_dec(xmit_recursion);
-+ xmit_rec_dec();
- if (dev_xmit_complete(rc)) {
- HARD_TX_UNLOCK(dev, txq);
- goto out;
-@@ -8501,7 +8506,7 @@ static void netdev_init_one_queue(struct
- /* Initialize queue lock */
- spin_lock_init(&queue->_xmit_lock);
- netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
-- queue->xmit_lock_owner = -1;
-+ netdev_queue_clear_owner(queue);
- netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
- queue->dev = dev;
- #ifdef CONFIG_BQL
---- a/net/core/filter.c
-+++ b/net/core/filter.c
-@@ -2002,7 +2002,7 @@ static inline int __bpf_tx_skb(struct ne
- {
- int ret;
-
-- if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
-+ if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT)) {
- net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
- kfree_skb(skb);
- return -ENETDOWN;
-@@ -2010,9 +2010,9 @@ static inline int __bpf_tx_skb(struct ne
-
- skb->dev = dev;
-
-- __this_cpu_inc(xmit_recursion);
-+ xmit_rec_inc();
- ret = dev_queue_xmit(skb);
-- __this_cpu_dec(xmit_recursion);
-+ xmit_rec_dec();
-
- return ret;
- }
diff --git a/debian/patches-rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/debian/patches-rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
deleted file mode 100644
index 0b04d3a4c..000000000
--- a/debian/patches-rt/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
+++ /dev/null
@@ -1,79 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 20 Jan 2016 15:39:05 +0100
-Subject: net: provide a way to delegate processing a softirq to
- ksoftirqd
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-If the NET_RX uses up all of his budget it moves the following NAPI
-invocations into the `ksoftirqd`. On -RT it does not do so. Instead it
-rises the NET_RX softirq in its current context again.
-
-In order to get closer to mainline's behaviour this patch provides
-__raise_softirq_irqoff_ksoft() which raises the softirq in the ksoftird.
-
-Cc: stable-rt@vger.kernel.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/interrupt.h | 8 ++++++++
- kernel/softirq.c | 21 +++++++++++++++++++++
- net/core/dev.c | 2 +-
- 3 files changed, 30 insertions(+), 1 deletion(-)
-
---- a/include/linux/interrupt.h
-+++ b/include/linux/interrupt.h
-@@ -539,6 +539,14 @@ extern void thread_do_softirq(void);
- extern void open_softirq(int nr, void (*action)(struct softirq_action *));
- extern void softirq_init(void);
- extern void __raise_softirq_irqoff(unsigned int nr);
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+extern void __raise_softirq_irqoff_ksoft(unsigned int nr);
-+#else
-+static inline void __raise_softirq_irqoff_ksoft(unsigned int nr)
-+{
-+ __raise_softirq_irqoff(nr);
-+}
-+#endif
-
- extern void raise_softirq_irqoff(unsigned int nr);
- extern void raise_softirq(unsigned int nr);
---- a/kernel/softirq.c
-+++ b/kernel/softirq.c
-@@ -724,6 +724,27 @@ void __raise_softirq_irqoff(unsigned int
- }
-
- /*
-+ * Same as __raise_softirq_irqoff() but will process them in ksoftirqd
-+ */
-+void __raise_softirq_irqoff_ksoft(unsigned int nr)
-+{
-+ unsigned int mask;
-+
-+ if (WARN_ON_ONCE(!__this_cpu_read(ksoftirqd) ||
-+ !__this_cpu_read(ktimer_softirqd)))
-+ return;
-+ mask = 1UL << nr;
-+
-+ trace_softirq_raise(nr);
-+ or_softirq_pending(mask);
-+ if (mask & TIMER_SOFTIRQS)
-+ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask;
-+ else
-+ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask;
-+ wakeup_proper_softirq(nr);
-+}
-+
-+/*
- * This function must run with irqs disabled!
- */
- void raise_softirq_irqoff(unsigned int nr)
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -6466,7 +6466,7 @@ static __latent_entropy void net_rx_acti
- list_splice_tail(&repoll, &list);
- list_splice(&list, &sd->poll_list);
- if (!list_empty(&sd->poll_list))
-- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
-+ __raise_softirq_irqoff_ksoft(NET_RX_SOFTIRQ);
-
- net_rps_action_and_irq_enable(sd);
- out:
diff --git a/debian/patches-rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch b/debian/patches-rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
index f718d0a96..6da0c6f47 100644
--- a/debian/patches-rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
+++ b/debian/patches-rt/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
@@ -1,7 +1,7 @@
From: Marc Kleine-Budde <mkl@pengutronix.de>
Date: Wed, 5 Mar 2014 00:49:47 +0100
Subject: net: sched: Use msleep() instead of yield()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
On PREEMPT_RT enabled systems the interrupt handler run as threads at prio 50
(by default). If a high priority userspace process tries to shut down a busy
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
-@@ -1223,7 +1223,7 @@ void dev_deactivate_many(struct list_hea
+@@ -1203,7 +1203,7 @@ void dev_deactivate_many(struct list_hea
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list) {
while (some_qdisc_is_busy(dev))
diff --git a/debian/patches-rt/net-use-cpu-chill.patch b/debian/patches-rt/net-use-cpu-chill.patch
index 11237a164..a10d946fe 100644
--- a/debian/patches-rt/net-use-cpu-chill.patch
+++ b/debian/patches-rt/net-use-cpu-chill.patch
@@ -1,7 +1,7 @@
Subject: net: Use cpu_chill() instead of cpu_relax()
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 07 Mar 2012 21:10:04 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Retry loops on RT might loop forever when the modifying side was
preempted. Use cpu_chill() instead of cpu_relax() to let the system
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
-@@ -63,6 +63,7 @@
+@@ -57,6 +57,7 @@
#include <linux/if_packet.h>
#include <linux/wireless.h>
#include <linux/kernel.h>
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-@@ -667,7 +668,7 @@ static void prb_retire_rx_blk_timer_expi
+@@ -659,7 +660,7 @@ static void prb_retire_rx_blk_timer_expi
if (BLOCK_NUM_PKTS(pbd)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -929,7 +930,7 @@ static void prb_retire_current_block(str
+@@ -921,7 +922,7 @@ static void prb_retire_current_block(str
if (!(status & TP_STATUS_BLK_TMO)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
diff --git a/debian/patches-rt/net_disable_NET_RX_BUSY_POLL.patch b/debian/patches-rt/net_disable_NET_RX_BUSY_POLL.patch
index df32d63b9..89b0c59a2 100644
--- a/debian/patches-rt/net_disable_NET_RX_BUSY_POLL.patch
+++ b/debian/patches-rt/net_disable_NET_RX_BUSY_POLL.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat, 27 May 2017 19:02:06 +0200
Subject: net/core: disable NET_RX_BUSY_POLL
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
sk_busy_loop() does preempt_disable() followed by a few operations which can
take sleeping locks and may get long.
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/Kconfig
+++ b/net/Kconfig
-@@ -279,7 +279,7 @@ config CGROUP_NET_CLASSID
+@@ -280,7 +280,7 @@ config CGROUP_NET_CLASSID
config NET_RX_BUSY_POLL
bool
diff --git a/debian/patches-rt/of-allocate-free-phandle-cache-outside-of-the-devtre.patch b/debian/patches-rt/of-allocate-free-phandle-cache-outside-of-the-devtre.patch
index 6aade76fb..aa30e86c3 100644
--- a/debian/patches-rt/of-allocate-free-phandle-cache-outside-of-the-devtre.patch
+++ b/debian/patches-rt/of-allocate-free-phandle-cache-outside-of-the-devtre.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 31 Aug 2018 14:16:30 +0200
Subject: [PATCH] of: allocate / free phandle cache outside of the devtree_lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The phandle cache code allocates memory while holding devtree_lock which
is a raw_spinlock_t. Memory allocation (and free()) is not possible on
diff --git a/debian/patches-rt/oleg-signal-rt-fix.patch b/debian/patches-rt/oleg-signal-rt-fix.patch
index 52c0a2d42..d70e34f26 100644
--- a/debian/patches-rt/oleg-signal-rt-fix.patch
+++ b/debian/patches-rt/oleg-signal-rt-fix.patch
@@ -1,7 +1,7 @@
From: Oleg Nesterov <oleg@redhat.com>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: signal/x86: Delay calling signals in atomic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
On x86_64 we must disable preemption before we enable interrupts
for stack faults, int3 and debugging, because the current task is using
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
-@@ -152,6 +152,13 @@ static void exit_to_usermode_loop(struct
+@@ -153,6 +153,13 @@ static void exit_to_usermode_loop(struct
if (cached_flags & _TIF_NEED_RESCHED)
schedule();
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -881,6 +881,10 @@ struct task_struct {
+@@ -875,6 +875,10 @@ struct task_struct {
/* Restored if set_restore_sigmask() was used: */
sigset_t saved_sigmask;
struct sigpending pending;
@@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
unsigned int sas_ss_flags;
--- a/kernel/signal.c
+++ b/kernel/signal.c
-@@ -1268,8 +1268,8 @@ int do_send_sig_info(int sig, struct ker
+@@ -1274,8 +1274,8 @@ int do_send_sig_info(int sig, struct ker
* We don't want to have recursive SIGSEGV's etc, for example,
* that is why we also clear SIGNAL_UNKILLABLE.
*/
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
unsigned long int flags;
int ret, blocked, ignored;
-@@ -1298,6 +1298,39 @@ force_sig_info(int sig, struct kernel_si
+@@ -1304,6 +1304,39 @@ force_sig_info(int sig, struct kernel_si
return ret;
}
diff --git a/debian/patches-rt/panic-disable-random-on-rt.patch b/debian/patches-rt/panic-disable-random-on-rt.patch
index 21717bac5..49096c530 100644
--- a/debian/patches-rt/panic-disable-random-on-rt.patch
+++ b/debian/patches-rt/panic-disable-random-on-rt.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: panic: skip get_random_bytes for RT_FULL in init_oops_id
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Disable on -RT. If this is invoked from irq-context we will have problems
to acquire the sleeping lock.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/panic.c
+++ b/kernel/panic.c
-@@ -507,9 +507,11 @@ static u64 oops_id;
+@@ -512,9 +512,11 @@ static u64 oops_id;
static int init_oops_id(void)
{
diff --git a/debian/patches-rt/pci-switchtec-Don-t-use-completion-s-wait-queue.patch b/debian/patches-rt/pci-switchtec-Don-t-use-completion-s-wait-queue.patch
index fbe545ba4..40481fc36 100644
--- a/debian/patches-rt/pci-switchtec-Don-t-use-completion-s-wait-queue.patch
+++ b/debian/patches-rt/pci-switchtec-Don-t-use-completion-s-wait-queue.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 4 Oct 2017 10:24:23 +0200
Subject: [PATCH] pci/switchtec: Don't use completion's wait queue
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The poll callback is using completion's wait_queue_head_t member and
puts it in poll_wait() so the poll() caller gets a wakeup after command
@@ -97,7 +97,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ret |= EPOLLIN | EPOLLRDNORM;
if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
-@@ -1087,7 +1090,8 @@ static void stdev_kill(struct switchtec_
+@@ -1102,7 +1105,8 @@ static void stdev_kill(struct switchtec_
/* Wake up and kill any users waiting on an MRPC request */
list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
diff --git a/debian/patches-rt/percpu-include-irqflags.h-for-raw_local_irq_save.patch b/debian/patches-rt/percpu-include-irqflags.h-for-raw_local_irq_save.patch
index 4bb7feb47..bc88df67e 100644
--- a/debian/patches-rt/percpu-include-irqflags.h-for-raw_local_irq_save.patch
+++ b/debian/patches-rt/percpu-include-irqflags.h-for-raw_local_irq_save.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 11 Oct 2018 16:39:59 +0200
Subject: [PATCH] percpu: include irqflags.h for raw_local_irq_save()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The header percpu.h header file is using raw_local_irq_save() but does
not include irqflags.h for its definition. It compiles because the
diff --git a/debian/patches-rt/peterz-percpu-rwsem-rt.patch b/debian/patches-rt/peterz-percpu-rwsem-rt.patch
deleted file mode 100644
index cdd0d0f6e..000000000
--- a/debian/patches-rt/peterz-percpu-rwsem-rt.patch
+++ /dev/null
@@ -1,219 +0,0 @@
-Subject: locking/percpu-rwsem: Remove preempt_disable variants
-From: Peter Zijlstra <peterz@infradead.org>
-Date: Wed Nov 23 16:29:32 CET 2016
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Effective revert commit:
-
- 87709e28dc7c ("fs/locks: Use percpu_down_read_preempt_disable()")
-
-This is causing major pain for PREEMPT_RT and is only a very small
-performance issue for PREEMPT=y.
-
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
----
----
- fs/locks.c | 32 ++++++++++++++++----------------
- include/linux/percpu-rwsem.h | 24 ++++--------------------
- 2 files changed, 20 insertions(+), 36 deletions(-)
-
---- a/fs/locks.c
-+++ b/fs/locks.c
-@@ -1058,7 +1058,7 @@ static int flock_lock_inode(struct inode
- return -ENOMEM;
- }
-
-- percpu_down_read_preempt_disable(&file_rwsem);
-+ percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- if (request->fl_flags & FL_ACCESS)
- goto find_conflict;
-@@ -1100,7 +1100,7 @@ static int flock_lock_inode(struct inode
-
- out:
- spin_unlock(&ctx->flc_lock);
-- percpu_up_read_preempt_enable(&file_rwsem);
-+ percpu_up_read(&file_rwsem);
- if (new_fl)
- locks_free_lock(new_fl);
- locks_dispose_list(&dispose);
-@@ -1138,7 +1138,7 @@ static int posix_lock_inode(struct inode
- new_fl2 = locks_alloc_lock();
- }
-
-- percpu_down_read_preempt_disable(&file_rwsem);
-+ percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- /*
- * New lock request. Walk all POSIX locks and look for conflicts. If
-@@ -1317,7 +1317,7 @@ static int posix_lock_inode(struct inode
- }
- out:
- spin_unlock(&ctx->flc_lock);
-- percpu_up_read_preempt_enable(&file_rwsem);
-+ percpu_up_read(&file_rwsem);
- /*
- * Free any unused locks.
- */
-@@ -1589,7 +1589,7 @@ int __break_lease(struct inode *inode, u
- return error;
- }
-
-- percpu_down_read_preempt_disable(&file_rwsem);
-+ percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
-
- time_out_leases(inode, &dispose);
-@@ -1641,13 +1641,13 @@ int __break_lease(struct inode *inode, u
- locks_insert_block(fl, new_fl, leases_conflict);
- trace_break_lease_block(inode, new_fl);
- spin_unlock(&ctx->flc_lock);
-- percpu_up_read_preempt_enable(&file_rwsem);
-+ percpu_up_read(&file_rwsem);
-
- locks_dispose_list(&dispose);
- error = wait_event_interruptible_timeout(new_fl->fl_wait,
- !new_fl->fl_blocker, break_time);
-
-- percpu_down_read_preempt_disable(&file_rwsem);
-+ percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- trace_break_lease_unblock(inode, new_fl);
- locks_delete_block(new_fl);
-@@ -1664,7 +1664,7 @@ int __break_lease(struct inode *inode, u
- }
- out:
- spin_unlock(&ctx->flc_lock);
-- percpu_up_read_preempt_enable(&file_rwsem);
-+ percpu_up_read(&file_rwsem);
- locks_dispose_list(&dispose);
- locks_free_lock(new_fl);
- return error;
-@@ -1734,7 +1734,7 @@ int fcntl_getlease(struct file *filp)
-
- ctx = smp_load_acquire(&inode->i_flctx);
- if (ctx && !list_empty_careful(&ctx->flc_lease)) {
-- percpu_down_read_preempt_disable(&file_rwsem);
-+ percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- time_out_leases(inode, &dispose);
- list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
-@@ -1744,7 +1744,7 @@ int fcntl_getlease(struct file *filp)
- break;
- }
- spin_unlock(&ctx->flc_lock);
-- percpu_up_read_preempt_enable(&file_rwsem);
-+ percpu_up_read(&file_rwsem);
-
- locks_dispose_list(&dispose);
- }
-@@ -1818,7 +1818,7 @@ generic_add_lease(struct file *filp, lon
- return -EINVAL;
- }
-
-- percpu_down_read_preempt_disable(&file_rwsem);
-+ percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- time_out_leases(inode, &dispose);
- error = check_conflicting_open(dentry, arg, lease->fl_flags);
-@@ -1889,7 +1889,7 @@ generic_add_lease(struct file *filp, lon
- lease->fl_lmops->lm_setup(lease, priv);
- out:
- spin_unlock(&ctx->flc_lock);
-- percpu_up_read_preempt_enable(&file_rwsem);
-+ percpu_up_read(&file_rwsem);
- locks_dispose_list(&dispose);
- if (is_deleg)
- inode_unlock(inode);
-@@ -1912,7 +1912,7 @@ static int generic_delete_lease(struct f
- return error;
- }
-
-- percpu_down_read_preempt_disable(&file_rwsem);
-+ percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
- if (fl->fl_file == filp &&
-@@ -1925,7 +1925,7 @@ static int generic_delete_lease(struct f
- if (victim)
- error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
- spin_unlock(&ctx->flc_lock);
-- percpu_up_read_preempt_enable(&file_rwsem);
-+ percpu_up_read(&file_rwsem);
- locks_dispose_list(&dispose);
- return error;
- }
-@@ -2648,13 +2648,13 @@ locks_remove_lease(struct file *filp, st
- if (list_empty(&ctx->flc_lease))
- return;
-
-- percpu_down_read_preempt_disable(&file_rwsem);
-+ percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
- if (filp == fl->fl_file)
- lease_modify(fl, F_UNLCK, &dispose);
- spin_unlock(&ctx->flc_lock);
-- percpu_up_read_preempt_enable(&file_rwsem);
-+ percpu_up_read(&file_rwsem);
-
- locks_dispose_list(&dispose);
- }
---- a/include/linux/percpu-rwsem.h
-+++ b/include/linux/percpu-rwsem.h
-@@ -29,7 +29,7 @@ static struct percpu_rw_semaphore name =
- extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
- extern void __percpu_up_read(struct percpu_rw_semaphore *);
-
--static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem)
-+static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
- {
- might_sleep();
-
-@@ -47,16 +47,10 @@ static inline void percpu_down_read_pree
- __this_cpu_inc(*sem->read_count);
- if (unlikely(!rcu_sync_is_idle(&sem->rss)))
- __percpu_down_read(sem, false); /* Unconditional memory barrier */
-- barrier();
- /*
-- * The barrier() prevents the compiler from
-+ * The preempt_enable() prevents the compiler from
- * bleeding the critical section out.
- */
--}
--
--static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
--{
-- percpu_down_read_preempt_disable(sem);
- preempt_enable();
- }
-
-@@ -83,13 +77,9 @@ static inline int percpu_down_read_trylo
- return ret;
- }
-
--static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem)
-+static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
- {
-- /*
-- * The barrier() prevents the compiler from
-- * bleeding the critical section out.
-- */
-- barrier();
-+ preempt_disable();
- /*
- * Same as in percpu_down_read().
- */
-@@ -102,12 +92,6 @@ static inline void percpu_up_read_preemp
- rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
- }
-
--static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
--{
-- preempt_disable();
-- percpu_up_read_preempt_enable(sem);
--}
--
- extern void percpu_down_write(struct percpu_rw_semaphore *);
- extern void percpu_up_write(struct percpu_rw_semaphore *);
-
diff --git a/debian/patches-rt/pid.h-include-atomic.h.patch b/debian/patches-rt/pid.h-include-atomic.h.patch
index 8f23ca552..3d34cf337 100644
--- a/debian/patches-rt/pid.h-include-atomic.h.patch
+++ b/debian/patches-rt/pid.h-include-atomic.h.patch
@@ -1,7 +1,7 @@
From: Grygorii Strashko <Grygorii.Strashko@linaro.org>
Date: Tue, 21 Jul 2015 19:43:56 +0300
Subject: pid.h: include atomic.h
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
This patch fixes build error:
CC kernel/pid_namespace.o
diff --git a/debian/patches-rt/posix-timers-expiry-lock.patch b/debian/patches-rt/posix-timers-expiry-lock.patch
new file mode 100644
index 000000000..9a37dc334
--- /dev/null
+++ b/debian/patches-rt/posix-timers-expiry-lock.patch
@@ -0,0 +1,270 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Mon, 27 May 2019 16:54:06 +0200
+Subject: [PATCH] posix-timers: Add expiry lock
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+If a about to be removed posix timer is active then the code will retry the
+delete operation until it succeeds / the timer callback completes.
+
+Use hrtimer_grab_expiry_lock() for posix timers which use a hrtimer underneath
+to spin on a lock until the callback finished.
+
+Introduce cpu_timers_grab_expiry_lock() for the posix-cpu-timer. This will
+acquire the proper per-CPU spin_lock which is acquired by the CPU which is
+expirering the timer.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ fs/timerfd.c | 6 +++++-
+ include/linux/hrtimer.h | 1 +
+ include/linux/posix-timers.h | 1 +
+ kernel/time/alarmtimer.c | 2 +-
+ kernel/time/hrtimer.c | 2 +-
+ kernel/time/itimer.c | 1 +
+ kernel/time/posix-cpu-timers.c | 23 +++++++++++++++++++++++
+ kernel/time/posix-timers.c | 38 +++++++++++++++++++++++++++++---------
+ kernel/time/posix-timers.h | 2 ++
+ 9 files changed, 64 insertions(+), 12 deletions(-)
+
+--- a/fs/timerfd.c
++++ b/fs/timerfd.c
+@@ -471,7 +471,11 @@ static int do_timerfd_settime(int ufd, i
+ break;
+ }
+ spin_unlock_irq(&ctx->wqh.lock);
+- cpu_relax();
++
++ if (isalarm(ctx))
++ hrtimer_grab_expiry_lock(&ctx->t.alarm.timer);
++ else
++ hrtimer_grab_expiry_lock(&ctx->t.tmr);
+ }
+
+ /*
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -400,6 +400,7 @@ static inline void hrtimer_start(struct
+
+ extern int hrtimer_cancel(struct hrtimer *timer);
+ extern int hrtimer_try_to_cancel(struct hrtimer *timer);
++extern void hrtimer_grab_expiry_lock(const struct hrtimer *timer);
+
+ static inline void hrtimer_start_expires(struct hrtimer *timer,
+ enum hrtimer_mode mode)
+--- a/include/linux/posix-timers.h
++++ b/include/linux/posix-timers.h
+@@ -15,6 +15,7 @@ struct cpu_timer_list {
+ u64 expires;
+ struct task_struct *task;
+ int firing;
++ int firing_cpu;
+ };
+
+ /*
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -433,7 +433,7 @@ int alarm_cancel(struct alarm *alarm)
+ int ret = alarm_try_to_cancel(alarm);
+ if (ret >= 0)
+ return ret;
+- cpu_relax();
++ hrtimer_grab_expiry_lock(&alarm->timer);
+ }
+ }
+ EXPORT_SYMBOL_GPL(alarm_cancel);
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -930,7 +930,7 @@ u64 hrtimer_forward(struct hrtimer *time
+ }
+ EXPORT_SYMBOL_GPL(hrtimer_forward);
+
+-static void hrtimer_grab_expiry_lock(const struct hrtimer *timer)
++void hrtimer_grab_expiry_lock(const struct hrtimer *timer)
+ {
+ struct hrtimer_clock_base *base = timer->base;
+
+--- a/kernel/time/itimer.c
++++ b/kernel/time/itimer.c
+@@ -213,6 +213,7 @@ int do_setitimer(int which, struct itime
+ /* We are sharing ->siglock with it_real_fn() */
+ if (hrtimer_try_to_cancel(timer) < 0) {
+ spin_unlock_irq(&tsk->sighand->siglock);
++ hrtimer_grab_expiry_lock(timer);
+ goto again;
+ }
+ expires = timeval_to_ktime(value->it_value);
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -785,6 +785,7 @@ check_timers_list(struct list_head *time
+ return t->expires;
+
+ t->firing = 1;
++ t->firing_cpu = smp_processor_id();
+ list_move_tail(&t->entry, firing);
+ }
+
+@@ -1127,6 +1128,20 @@ static inline int fastpath_timer_check(s
+ return 0;
+ }
+
++static DEFINE_PER_CPU(spinlock_t, cpu_timer_expiry_lock) = __SPIN_LOCK_UNLOCKED(cpu_timer_expiry_lock);
++
++void cpu_timers_grab_expiry_lock(struct k_itimer *timer)
++{
++ int cpu = timer->it.cpu.firing_cpu;
++
++ if (cpu >= 0) {
++ spinlock_t *expiry_lock = per_cpu_ptr(&cpu_timer_expiry_lock, cpu);
++
++ spin_lock_irq(expiry_lock);
++ spin_unlock_irq(expiry_lock);
++ }
++}
++
+ /*
+ * This is called from the timer interrupt handler. The irq handler has
+ * already updated our counts. We need to check if any timers fire now.
+@@ -1137,6 +1152,7 @@ void run_posix_cpu_timers(struct task_st
+ LIST_HEAD(firing);
+ struct k_itimer *timer, *next;
+ unsigned long flags;
++ spinlock_t *expiry_lock;
+
+ lockdep_assert_irqs_disabled();
+
+@@ -1147,6 +1163,9 @@ void run_posix_cpu_timers(struct task_st
+ if (!fastpath_timer_check(tsk))
+ return;
+
++ expiry_lock = this_cpu_ptr(&cpu_timer_expiry_lock);
++ spin_lock(expiry_lock);
++
+ if (!lock_task_sighand(tsk, &flags))
+ return;
+ /*
+@@ -1181,6 +1200,7 @@ void run_posix_cpu_timers(struct task_st
+ list_del_init(&timer->it.cpu.entry);
+ cpu_firing = timer->it.cpu.firing;
+ timer->it.cpu.firing = 0;
++ timer->it.cpu.firing_cpu = -1;
+ /*
+ * The firing flag is -1 if we collided with a reset
+ * of the timer, which already reported this
+@@ -1190,6 +1210,7 @@ void run_posix_cpu_timers(struct task_st
+ cpu_timer_fire(timer);
+ spin_unlock(&timer->it_lock);
+ }
++ spin_unlock(expiry_lock);
+ }
+
+ /*
+@@ -1308,6 +1329,8 @@ static int do_cpu_nanosleep(const clocki
+ spin_unlock_irq(&timer.it_lock);
+
+ while (error == TIMER_RETRY) {
++
++ cpu_timers_grab_expiry_lock(&timer);
+ /*
+ * We need to handle case when timer was or is in the
+ * middle of firing. In other cases we already freed
+--- a/kernel/time/posix-timers.c
++++ b/kernel/time/posix-timers.c
+@@ -805,6 +805,17 @@ static int common_hrtimer_try_to_cancel(
+ return hrtimer_try_to_cancel(&timr->it.real.timer);
+ }
+
++static void timer_wait_for_callback(const struct k_clock *kc, struct k_itimer *timer)
++{
++ if (kc->timer_arm == common_hrtimer_arm)
++ hrtimer_grab_expiry_lock(&timer->it.real.timer);
++ else if (kc == &alarm_clock)
++ hrtimer_grab_expiry_lock(&timer->it.alarm.alarmtimer.timer);
++ else
++ /* posix-cpu-timers */
++ cpu_timers_grab_expiry_lock(timer);
++}
++
+ /* Set a POSIX.1b interval timer. */
+ int common_timer_set(struct k_itimer *timr, int flags,
+ struct itimerspec64 *new_setting,
+@@ -870,11 +881,15 @@ static int do_timer_settime(timer_t time
+ else
+ error = kc->timer_set(timr, flags, new_spec64, old_spec64);
+
+- unlock_timer(timr, flag);
+ if (error == TIMER_RETRY) {
++ rcu_read_lock();
++ unlock_timer(timr, flag);
++ timer_wait_for_callback(kc, timr);
++ rcu_read_unlock();
+ old_spec64 = NULL; // We already got the old time...
+ goto retry;
+ }
++ unlock_timer(timr, flag);
+
+ return error;
+ }
+@@ -936,13 +951,21 @@ int common_timer_del(struct k_itimer *ti
+ return 0;
+ }
+
+-static inline int timer_delete_hook(struct k_itimer *timer)
++static int timer_delete_hook(struct k_itimer *timer)
+ {
+ const struct k_clock *kc = timer->kclock;
++ int ret;
+
+ if (WARN_ON_ONCE(!kc || !kc->timer_del))
+ return -EINVAL;
+- return kc->timer_del(timer);
++ ret = kc->timer_del(timer);
++ if (ret == TIMER_RETRY) {
++ rcu_read_lock();
++ spin_unlock_irq(&timer->it_lock);
++ timer_wait_for_callback(kc, timer);
++ rcu_read_unlock();
++ }
++ return ret;
+ }
+
+ /* Delete a POSIX.1b interval timer. */
+@@ -956,10 +979,8 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t
+ if (!timer)
+ return -EINVAL;
+
+- if (timer_delete_hook(timer) == TIMER_RETRY) {
+- unlock_timer(timer, flags);
++ if (timer_delete_hook(timer) == TIMER_RETRY)
+ goto retry_delete;
+- }
+
+ spin_lock(&current->sighand->siglock);
+ list_del(&timer->list);
+@@ -985,10 +1006,9 @@ static void itimer_delete(struct k_itime
+ retry_delete:
+ spin_lock_irqsave(&timer->it_lock, flags);
+
+- if (timer_delete_hook(timer) == TIMER_RETRY) {
+- unlock_timer(timer, flags);
++ if (timer_delete_hook(timer) == TIMER_RETRY)
+ goto retry_delete;
+- }
++
+ list_del(&timer->list);
+ /*
+ * This keeps any tasks waiting on the spin lock from thinking
+--- a/kernel/time/posix-timers.h
++++ b/kernel/time/posix-timers.h
+@@ -32,6 +32,8 @@ extern const struct k_clock clock_proces
+ extern const struct k_clock clock_thread;
+ extern const struct k_clock alarm_clock;
+
++extern void cpu_timers_grab_expiry_lock(struct k_itimer *timer);
++
+ int posix_timer_event(struct k_itimer *timr, int si_private);
+
+ void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting);
diff --git a/debian/patches-rt/posix-timers-move-rcu-out-of-union.patch b/debian/patches-rt/posix-timers-move-rcu-out-of-union.patch
new file mode 100644
index 000000000..80bdc22d7
--- /dev/null
+++ b/debian/patches-rt/posix-timers-move-rcu-out-of-union.patch
@@ -0,0 +1,52 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 27 May 2019 16:54:05 +0200
+Subject: [PATCH] posix-timers: move rcu out of union
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+On RT the timer can be preempted while running and therefore we wait
+with timer_wait_for_callback() for the timer to complete (instead of
+busy looping). The RCU-readlock is held to ensure that this posix timer
+is not removed while we wait on it.
+If the timer is removed then it invokes call_rcu() with a pointer that
+is shared with the hrtimer because it is part of the same union.
+In order to avoid any possible side effects I am moving the rcu pointer
+out of the union.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/posix-timers.h | 2 +-
+ kernel/time/posix-timers.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/include/linux/posix-timers.h
++++ b/include/linux/posix-timers.h
+@@ -114,8 +114,8 @@ struct k_itimer {
+ struct {
+ struct alarm alarmtimer;
+ } alarm;
+- struct rcu_head rcu;
+ } it;
++ struct rcu_head rcu;
+ };
+
+ void run_posix_cpu_timers(struct task_struct *task);
+--- a/kernel/time/posix-timers.c
++++ b/kernel/time/posix-timers.c
+@@ -442,7 +442,7 @@ static struct k_itimer * alloc_posix_tim
+
+ static void k_itimer_rcu_free(struct rcu_head *head)
+ {
+- struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu);
++ struct k_itimer *tmr = container_of(head, struct k_itimer, rcu);
+
+ kmem_cache_free(posix_timers_cache, tmr);
+ }
+@@ -459,7 +459,7 @@ static void release_posix_timer(struct k
+ }
+ put_pid(tmr->it_pid);
+ sigqueue_free(tmr->sigq);
+- call_rcu(&tmr->it.rcu, k_itimer_rcu_free);
++ call_rcu(&tmr->rcu, k_itimer_rcu_free);
+ }
+
+ static int common_timer_create(struct k_itimer *new_timer)
diff --git a/debian/patches-rt/posix-timers-thread-posix-cpu-timers-on-rt.patch b/debian/patches-rt/posix-timers-thread-posix-cpu-timers-on-rt.patch
index bd95e3fdb..28171cf98 100644
--- a/debian/patches-rt/posix-timers-thread-posix-cpu-timers-on-rt.patch
+++ b/debian/patches-rt/posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -1,7 +1,7 @@
From: John Stultz <johnstul@us.ibm.com>
Date: Fri, 3 Jul 2009 08:29:58 -0500
Subject: posix-timers: Thread posix-cpu-timers on -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
posix-cpu-timer code takes non -rt safe locks in hard irq
context. Move it to a thread.
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -832,6 +832,9 @@ struct task_struct {
+@@ -826,6 +826,9 @@ struct task_struct {
#ifdef CONFIG_POSIX_TIMERS
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Process credentials: */
--- a/init/init_task.c
+++ b/init/init_task.c
-@@ -50,6 +50,12 @@ static struct sighand_struct init_sighan
+@@ -51,6 +51,12 @@ static struct sighand_struct init_sighan
.signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(init_sighand.signalfd_wqh),
};
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Set up the first task table, touch at your own risk!. Base=0,
* limit=0x1fffff (=2MB)
-@@ -119,6 +125,7 @@ struct task_struct init_task
+@@ -120,6 +126,7 @@ struct task_struct init_task
INIT_CPU_TIMERS(init_task)
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(init_task.pi_lock),
.timer_slack_ns = 50000, /* 50 usec default slack */
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
.thread_node = LIST_HEAD_INIT(init_signals.thread_head),
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -1621,6 +1621,9 @@ static void rt_mutex_init_task(struct ta
+@@ -1647,6 +1647,9 @@ static void rt_mutex_init_task(struct ta
*/
static void posix_cpu_timers_init(struct task_struct *tsk)
{
@@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include "posix-timers.h"
-@@ -1133,14 +1136,12 @@ static inline int fastpath_timer_check(s
+@@ -1147,15 +1150,13 @@ void cpu_timers_grab_expiry_lock(struct
* already updated our counts. We need to check if any timers fire now.
* Interrupts are disabled.
*/
@@ -96,14 +96,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
LIST_HEAD(firing);
struct k_itimer *timer, *next;
unsigned long flags;
+ spinlock_t *expiry_lock;
- lockdep_assert_irqs_disabled();
-
/*
* The fast path checks that there are no expired thread or thread
* group timers. If that's so, just return.
-@@ -1193,6 +1194,153 @@ void run_posix_cpu_timers(struct task_st
- }
+@@ -1213,6 +1214,153 @@ void run_posix_cpu_timers(struct task_st
+ spin_unlock(expiry_lock);
}
+#ifdef CONFIG_PREEMPT_RT_BASE
diff --git a/debian/patches-rt/power-disable-highmem-on-rt.patch b/debian/patches-rt/power-disable-highmem-on-rt.patch
index 4d26856f4..6d02e4933 100644
--- a/debian/patches-rt/power-disable-highmem-on-rt.patch
+++ b/debian/patches-rt/power-disable-highmem-on-rt.patch
@@ -1,7 +1,7 @@
Subject: powerpc: Disable highmem on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 18 Jul 2011 17:08:34 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The current highmem handling on -RT is not compatible and needs fixups.
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -395,7 +395,7 @@ menu "Kernel options"
+@@ -386,7 +386,7 @@ menu "Kernel options"
config HIGHMEM
bool "High memory support"
diff --git a/debian/patches-rt/power-use-generic-rwsem-on-rt.patch b/debian/patches-rt/power-use-generic-rwsem-on-rt.patch
deleted file mode 100644
index a5ade7962..000000000
--- a/debian/patches-rt/power-use-generic-rwsem-on-rt.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Tue, 14 Jul 2015 14:26:34 +0200
-Subject: powerpc: Use generic rwsem on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Use generic code which uses rtmutex
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- arch/powerpc/Kconfig | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
---- a/arch/powerpc/Kconfig
-+++ b/arch/powerpc/Kconfig
-@@ -105,10 +105,11 @@ config LOCKDEP_SUPPORT
-
- config RWSEM_GENERIC_SPINLOCK
- bool
-+ default y if PREEMPT_RT_FULL
-
- config RWSEM_XCHGADD_ALGORITHM
- bool
-- default y
-+ default y if !PREEMPT_RT_FULL
-
- config GENERIC_LOCKBREAK
- bool
diff --git a/debian/patches-rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch b/debian/patches-rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
index 43d65e0c5..e9deff223 100644
--- a/debian/patches-rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
+++ b/debian/patches-rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
@@ -1,7 +1,7 @@
From: Bogdan Purcareata <bogdan.purcareata@freescale.com>
Date: Fri, 24 Apr 2015 15:53:13 +0000
Subject: powerpc/kvm: Disable in-kernel MPIC emulation for PREEMPT_RT_FULL
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
While converting the openpic emulation code to use a raw_spinlock_t enables
guests to run on RT, there's still a performance issue. For interrupts sent in
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
-@@ -178,6 +178,7 @@ config KVM_E500MC
+@@ -177,6 +177,7 @@ config KVM_E500MC
config KVM_MPIC
bool "KVM in-kernel MPIC emulation"
depends on KVM && E500
diff --git a/debian/patches-rt/powerpc-preempt-lazy-support.patch b/debian/patches-rt/powerpc-preempt-lazy-support.patch
index 9c92643a3..0890cb3dd 100644
--- a/debian/patches-rt/powerpc-preempt-lazy-support.patch
+++ b/debian/patches-rt/powerpc-preempt-lazy-support.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 1 Nov 2012 10:14:11 +0100
Subject: powerpc: Add support for lazy preemption
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Implement the powerpc pieces for lazy preempt.
@@ -10,32 +10,32 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/powerpc/Kconfig | 1 +
arch/powerpc/include/asm/thread_info.h | 16 ++++++++++++----
arch/powerpc/kernel/asm-offsets.c | 1 +
- arch/powerpc/kernel/entry_32.S | 29 +++++++++++++++++++----------
- arch/powerpc/kernel/entry_64.S | 26 ++++++++++++++++++--------
- 5 files changed, 51 insertions(+), 22 deletions(-)
+ arch/powerpc/kernel/entry_32.S | 23 ++++++++++++++++-------
+ arch/powerpc/kernel/entry_64.S | 24 +++++++++++++++++-------
+ 5 files changed, 47 insertions(+), 18 deletions(-)
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -219,6 +219,7 @@ config PPC
+@@ -213,6 +213,7 @@ config PPC
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_PREEMPT_LAZY
select HAVE_RCU_TABLE_FREE if SMP
- select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_RELIABLE_STACKTRACE if PPC64 && CPU_LITTLE_ENDIAN
+ select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
+ select HAVE_MMU_GATHER_PAGE_SIZE
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
-@@ -38,6 +38,8 @@ struct thread_info {
- int cpu; /* cpu we're on */
+@@ -30,6 +30,8 @@
+ struct thread_info {
int preempt_count; /* 0 => preemptable,
<0 => BUG */
-+ int preempt_lazy_count; /* 0 => preemptable,
++ int preempt_lazy_count; /* 0 => preemptable,
+ <0 => BUG */
unsigned long local_flags; /* private flags for thread */
#ifdef CONFIG_LIVEPATCH
unsigned long *livepatch_sp;
-@@ -99,11 +101,12 @@ void arch_setup_new_exec(void);
+@@ -80,11 +82,12 @@ void arch_setup_new_exec(void);
#define TIF_SINGLESTEP 8 /* singlestepping active */
#define TIF_NOHZ 9 /* in adaptive nohz mode */
#define TIF_SECCOMP 10 /* secure computing */
@@ -51,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
for stack store? */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
-@@ -112,6 +115,9 @@ void arch_setup_new_exec(void);
+@@ -93,6 +96,9 @@ void arch_setup_new_exec(void);
#endif
#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_32BIT 20 /* 32 bit binary */
@@ -61,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-@@ -131,6 +137,7 @@ void arch_setup_new_exec(void);
+@@ -112,6 +118,7 @@ void arch_setup_new_exec(void);
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
#define _TIF_NOHZ (1<<TIF_NOHZ)
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define _TIF_FSCHECK (1<<TIF_FSCHECK)
#define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
-@@ -140,8 +147,9 @@ void arch_setup_new_exec(void);
+@@ -121,8 +128,9 @@ void arch_setup_new_exec(void);
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
_TIF_RESTORE_TM | _TIF_PATCH_PENDING | \
@@ -82,19 +82,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
-@@ -161,6 +161,7 @@ int main(void)
+@@ -167,6 +167,7 @@ int main(void)
OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
OFFSET(TI_PREEMPT, thread_info, preempt_count);
+ OFFSET(TI_PREEMPT_LAZY, thread_info, preempt_lazy_count);
- OFFSET(TI_TASK, thread_info, task);
- OFFSET(TI_CPU, thread_info, cpu);
+ #ifdef CONFIG_PPC64
+ OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size);
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
-@@ -393,7 +393,9 @@ reenable_mmu: /* re-enable mmu so we
+@@ -400,7 +400,9 @@
MTMSRD(r10)
- lwz r9,TI_FLAGS(r12)
+ lwz r9,TI_FLAGS(r2)
li r8,-MAX_ERRNO
- andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
+ lis r0,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@h
@@ -103,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bne- syscall_exit_work
cmplw 0,r3,r8
blt+ syscall_exit_cont
-@@ -511,13 +513,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
+@@ -515,13 +517,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
b syscall_dotrace_cont
syscall_exit_work:
@@ -119,52 +119,37 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bne- 1f
lwz r11,_CCR(r1) /* Load CR */
neg r3,r3
-@@ -526,12 +528,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
+@@ -530,12 +532,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
1: stw r6,RESULT(r1) /* Save result */
stw r3,GPR3(r1) /* Update return value */
-2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
-+2: andis. r0,r9,(_TIF_PERSYSCALL_MASK)@h
++2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)@h
beq 4f
/* Clear per-syscall TIF flags if any are set. */
- li r11,_TIF_PERSYSCALL_MASK
-+ lis r11,_TIF_PERSYSCALL_MASK@h
- addi r12,r12,TI_FLAGS
++ li r11,_TIF_PERSYSCALL_MASK@h
+ addi r12,r2,TI_FLAGS
3: lwarx r8,0,r12
andc r8,r8,r11
-@@ -888,7 +890,14 @@ user_exc_return: /* r10 contains MSR_KE
+@@ -890,7 +892,14 @@ user_exc_return: /* r10 contains MSR_KE
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
- bne restore
+ bne restore_kuap
andi. r8,r8,_TIF_NEED_RESCHED
+ bne+ 1f
+ lwz r0,TI_PREEMPT_LAZY(r9)
-+ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
-+ bne restore
++ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
++ bne restore_kuap
+ lwz r0,TI_FLAGS(r9)
+ andi. r0,r0,_TIF_NEED_RESCHED_LAZY
- beq+ restore
+ beq+ restore_kuap
+1:
lwz r3,_MSR(r1)
andi. r0,r3,MSR_EE /* interrupts off? */
- beq restore /* don't schedule if so */
-@@ -899,11 +908,11 @@ user_exc_return: /* r10 contains MSR_KE
- */
- bl trace_hardirqs_off
- #endif
--1: bl preempt_schedule_irq
-+2: bl preempt_schedule_irq
- CURRENT_THREAD_INFO(r9, r1)
- lwz r3,TI_FLAGS(r9)
-- andi. r0,r3,_TIF_NEED_RESCHED
-- bne- 1b
-+ andi. r0,r3,_TIF_NEED_RESCHED_MASK
-+ bne- 2b
- #ifdef CONFIG_TRACE_IRQFLAGS
- /* And now, to properly rebalance the above, we tell lockdep they
- * are being turned back on, which will happen when we return
-@@ -1232,7 +1241,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
+ beq restore_kuap /* don't schedule if so */
+@@ -1211,7 +1220,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
do_work: /* r10 contains MSR_KERNEL here */
@@ -173,10 +158,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
-@@ -1253,7 +1262,7 @@ do_resched: /* r10 contains MSR_KERNEL
+@@ -1232,7 +1241,7 @@ do_resched: /* r10 contains MSR_KERNEL
+ SYNC
MTMSRD(r10) /* disable interrupts */
- CURRENT_THREAD_INFO(r9, r1)
- lwz r9,TI_FLAGS(r9)
+ lwz r9,TI_FLAGS(r2)
- andi. r0,r9,_TIF_NEED_RESCHED
+ andi. r0,r9,_TIF_NEED_RESCHED_MASK
bne- do_resched
@@ -184,7 +169,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq restore_user
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
-@@ -253,7 +253,9 @@ system_call: /* label this so stack tr
+@@ -249,7 +249,9 @@ system_call: /* label this so stack tr
ld r9,TI_FLAGS(r12)
li r11,-MAX_ERRNO
@@ -195,7 +180,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bne- .Lsyscall_exit_work
andi. r0,r8,MSR_FP
-@@ -370,25 +372,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+@@ -372,25 +374,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
If TIF_NOERROR is set, just save r3 as it is. */
@@ -225,7 +210,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
addi r12,r12,TI_FLAGS
3: ldarx r10,0,r12
andc r10,r10,r11
-@@ -780,7 +782,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+@@ -784,7 +786,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
bl restore_math
b restore
#endif
@@ -234,7 +219,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq 2f
bl restore_interrupts
SCHEDULE_USER
-@@ -842,10 +844,18 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+@@ -846,10 +848,18 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
#ifdef CONFIG_PREEMPT
/* Check if we need to preempt */
@@ -254,12 +239,3 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cmpwi cr0,r8,0
bne restore
ld r0,SOFTE(r1)
-@@ -862,7 +872,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
- /* Re-test flags and eventually loop */
- CURRENT_THREAD_INFO(r9, r1)
- ld r4,TI_FLAGS(r9)
-- andi. r0,r4,_TIF_NEED_RESCHED
-+ andi. r0,r4,_TIF_NEED_RESCHED_MASK
- bne 1b
-
- /*
diff --git a/debian/patches-rt/powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch b/debian/patches-rt/powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch
index aa70e602b..7d99de126 100644
--- a/debian/patches-rt/powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch
+++ b/debian/patches-rt/powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 26 Mar 2019 18:31:54 +0100
Subject: [PATCH] powerpc/pseries/iommu: Use a locallock instead
local_irq_save()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The locallock protects the per-CPU variable tce_page. The function
attempts to allocate memory while tce_page is protected (by disabling
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
-@@ -38,6 +38,7 @@
+@@ -24,6 +24,7 @@
#include <linux/of.h>
#include <linux/iommu.h>
#include <linux/rculist.h>
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/rtas.h>
-@@ -191,6 +192,7 @@ static int tce_build_pSeriesLP(struct io
+@@ -177,6 +178,7 @@ static int tce_build_pSeriesLP(struct io
}
static DEFINE_PER_CPU(__be64 *, tce_page);
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
-@@ -211,7 +213,8 @@ static int tce_buildmulti_pSeriesLP(stru
+@@ -197,7 +199,8 @@ static int tce_buildmulti_pSeriesLP(stru
direction, attrs);
}
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
tcep = __this_cpu_read(tce_page);
-@@ -222,7 +225,7 @@ static int tce_buildmulti_pSeriesLP(stru
+@@ -208,7 +211,7 @@ static int tce_buildmulti_pSeriesLP(stru
tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
/* If allocation fails, fall back to the loop implementation */
if (!tcep) {
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
direction, attrs);
}
-@@ -256,7 +259,7 @@ static int tce_buildmulti_pSeriesLP(stru
+@@ -242,7 +245,7 @@ static int tce_buildmulti_pSeriesLP(stru
tcenum += limit;
} while (npages > 0 && !rc);
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
ret = (int)rc;
-@@ -414,13 +417,14 @@ static int tce_setrange_multi_pSeriesLP(
+@@ -400,13 +403,14 @@ static int tce_setrange_multi_pSeriesLP(
u64 rc = 0;
long l, limit;
@@ -79,7 +79,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return -ENOMEM;
}
__this_cpu_write(tce_page, tcep);
-@@ -466,7 +470,7 @@ static int tce_setrange_multi_pSeriesLP(
+@@ -452,7 +456,7 @@ static int tce_setrange_multi_pSeriesLP(
/* error cleanup: caller will clear whole range */
diff --git a/debian/patches-rt/powerpc-stackprotector-work-around-stack-guard-init-.patch b/debian/patches-rt/powerpc-stackprotector-work-around-stack-guard-init-.patch
index 9444fffc7..fe9c479d6 100644
--- a/debian/patches-rt/powerpc-stackprotector-work-around-stack-guard-init-.patch
+++ b/debian/patches-rt/powerpc-stackprotector-work-around-stack-guard-init-.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 26 Mar 2019 18:31:29 +0100
Subject: [PATCH ] powerpc/stackprotector: work around stack-guard init from
atomic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
This is invoked from the secondary CPU in atomic context. On x86 we use
tsc instead. On Power we XOR it against mftb() so lets use stack address
diff --git a/debian/patches-rt/preempt-lazy-support.patch b/debian/patches-rt/preempt-lazy-support.patch
index e2f10b6e4..ac8a9e0b8 100644
--- a/debian/patches-rt/preempt-lazy-support.patch
+++ b/debian/patches-rt/preempt-lazy-support.patch
@@ -1,7 +1,7 @@
Subject: sched: Add support for lazy preemption
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 26 Oct 2012 18:50:54 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
It has become an obsession to mitigate the determinism vs. throughput
loss of RT. Looking at the mainline semantics of preemption points
@@ -63,10 +63,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/sched/fair.c | 16 ++++----
kernel/sched/features.h | 3 +
kernel/sched/sched.h | 9 ++++
- kernel/trace/trace.c | 36 ++++++++++--------
+ kernel/trace/trace.c | 35 ++++++++++--------
kernel/trace/trace.h | 2 +
kernel/trace/trace_output.c | 14 ++++++-
- 13 files changed, 228 insertions(+), 29 deletions(-)
+ 13 files changed, 227 insertions(+), 29 deletions(-)
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -220,7 +220,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TRACE_EVENT_TYPE_MAX \
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
-@@ -6,6 +6,12 @@ config PREEMPT_RT_BASE
+@@ -7,6 +7,12 @@ config PREEMPT_RT_BASE
bool
select PREEMPT
@@ -235,7 +235,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
default PREEMPT_NONE
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -304,11 +304,13 @@ void pin_current_cpu(void)
+@@ -305,11 +305,13 @@ void pin_current_cpu(void)
return;
}
cpu = smp_processor_id();
@@ -251,7 +251,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto again;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -505,6 +505,48 @@ void resched_curr(struct rq *rq)
+@@ -544,6 +544,48 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -300,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -2424,6 +2466,9 @@ int sched_fork(unsigned long clone_flags
+@@ -2456,6 +2498,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -310,7 +310,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3484,6 +3529,7 @@ static void __sched notrace __schedule(b
+@@ -3514,6 +3559,7 @@ static void __sched notrace __schedule(b
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -318,7 +318,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
if (likely(prev != next)) {
-@@ -3664,6 +3710,30 @@ static void __sched notrace preempt_sche
+@@ -3697,6 +3743,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -349,7 +349,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
-@@ -3678,7 +3748,8 @@ asmlinkage __visible void __sched notrac
+@@ -3711,7 +3781,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -359,7 +359,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -3705,6 +3776,9 @@ asmlinkage __visible void __sched notrac
+@@ -3738,6 +3809,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -369,7 +369,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -5471,7 +5545,9 @@ void init_idle(struct task_struct *idle,
+@@ -5503,7 +5577,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -380,7 +380,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The idle tasks have their own, simple scheduling class:
*/
-@@ -7195,6 +7271,7 @@ void migrate_disable(void)
+@@ -7259,6 +7335,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -388,15 +388,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pin_current_cpu();
migrate_disable_update_cpus_allowed(p);
-@@ -7262,6 +7339,7 @@ void migrate_enable(void)
+@@ -7326,12 +7403,14 @@ void migrate_enable(void)
arg.dest_cpu = dest_cpu;
unpin_current_cpu();
+ preempt_lazy_enable();
preempt_enable();
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
- tlb_migrate_finish(p->mm);
-@@ -7270,6 +7348,7 @@ void migrate_enable(void)
+ return;
}
}
unpin_current_cpu();
@@ -406,7 +405,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(migrate_enable);
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -4029,7 +4029,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -4050,7 +4050,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime) {
@@ -415,7 +414,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The current task ran long enough, ensure it doesn't get
* re-elected due to buddy favours.
-@@ -4053,7 +4053,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -4074,7 +4074,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
return;
if (delta > ideal_runtime)
@@ -424,7 +423,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void
-@@ -4195,7 +4195,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+@@ -4216,7 +4216,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
* validating it and just reschedule.
*/
if (queued) {
@@ -433,7 +432,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
/*
-@@ -4379,7 +4379,7 @@ static void __account_cfs_rq_runtime(str
+@@ -4400,7 +4400,7 @@ static void __account_cfs_rq_runtime(str
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -442,7 +441,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static __always_inline
-@@ -5075,7 +5075,7 @@ static void hrtick_start_fair(struct rq
+@@ -5110,7 +5110,7 @@ static void hrtick_start_fair(struct rq
if (delta < 0) {
if (rq->curr == p)
@@ -451,7 +450,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -6904,7 +6904,7 @@ static void check_preempt_wakeup(struct
+@@ -6950,7 +6950,7 @@ static void check_preempt_wakeup(struct
return;
preempt:
@@ -460,7 +459,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -10100,7 +10100,7 @@ static void task_fork_fair(struct task_s
+@@ -10222,7 +10222,7 @@ static void task_fork_fair(struct task_s
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -469,7 +468,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -10124,7 +10124,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -10246,7 +10246,7 @@ prio_changed_fair(struct rq *rq, struct
*/
if (rq->curr == p) {
if (p->prio > oldprio)
@@ -492,7 +491,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1767,6 +1767,15 @@ extern void reweight_task(struct task_st
+@@ -1786,6 +1786,15 @@ extern void reweight_task(struct task_st
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
@@ -510,7 +509,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2134,6 +2134,7 @@ tracing_generic_entry_update(struct trac
+@@ -2318,6 +2318,7 @@ tracing_generic_entry_update(struct trac
struct task_struct *tsk = current;
entry->preempt_count = pc & 0xff;
@@ -518,7 +517,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
entry->pid = (tsk) ? tsk->pid : 0;
entry->flags =
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
-@@ -2144,7 +2145,8 @@ tracing_generic_entry_update(struct trac
+@@ -2328,7 +2329,8 @@ tracing_generic_entry_update(struct trac
((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
@@ -528,7 +527,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
-@@ -3347,15 +3349,17 @@ get_total_entries(struct trace_buffer *b
+@@ -3555,15 +3557,17 @@ unsigned long trace_total_entries(struct
static void print_lat_help_header(struct seq_file *m)
{
@@ -555,32 +554,27 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -3393,15 +3397,17 @@ static void print_func_help_header_irq(s
- tgid ? tgid_space : space);
- seq_printf(m, "# %s / _----=> need-resched\n",
- tgid ? tgid_space : space);
-- seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
-+ seq_printf(m, "# %s| / _---=> need-resched_lazy\n",
- tgid ? tgid_space : space);
-- seq_printf(m, "# %s|| / _--=> preempt-depth\n",
-+ seq_printf(m, "# %s|| / _--=> hardirq/softirq\n",
- tgid ? tgid_space : space);
-- seq_printf(m, "# %s||| / delay\n",
-+ seq_printf(m, "# %s||| / preempt-depth\n",
- tgid ? tgid_space : space);
-- seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n",
-+ seq_printf(m, "# %s|||| / delay\n",
-+ tgid ? tgid_space : space);
-+ seq_printf(m, "# TASK-PID %sCPU# ||||| TIMESTAMP FUNCTION\n",
- tgid ? " TGID " : space);
-- seq_printf(m, "# | | %s | |||| | |\n",
-+ seq_printf(m, "# | | %s | ||||| | |\n",
- tgid ? " | " : space);
+@@ -3599,11 +3603,12 @@ static void print_func_help_header_irq(s
+
+ seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
+ seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
+- seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
+- seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
+- seq_printf(m, "# %.*s||| / delay\n", prec, space);
+- seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
+- seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
++ seq_printf(m, "# %.*s| / _----=> need-resched\n", prec, space);
++ seq_printf(m, "# %.*s|| / _---=> hardirq/softirq\n", prec, space);
++ seq_printf(m, "# %.*s||| / _--=> preempt-depth\n", prec, space);
++ seq_printf(m, "# %.*s||||/ delay\n", prec, space);
++ seq_printf(m, "# TASK-PID %.*sCPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
++ seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
}
+ void
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
-@@ -127,6 +127,7 @@ struct kretprobe_trace_entry_head {
+@@ -126,6 +126,7 @@ struct kretprobe_trace_entry_head {
* NEED_RESCHED - reschedule is requested
* HARDIRQ - inside an interrupt handler
* SOFTIRQ - inside a softirq handler
@@ -588,7 +582,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
-@@ -136,6 +137,7 @@ enum trace_flag_type {
+@@ -135,6 +136,7 @@ enum trace_flag_type {
TRACE_FLAG_SOFTIRQ = 0x10,
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
TRACE_FLAG_NMI = 0x40,
diff --git a/debian/patches-rt/preempt-nort-rt-variants.patch b/debian/patches-rt/preempt-nort-rt-variants.patch
index d859f8bbe..a168f5e7c 100644
--- a/debian/patches-rt/preempt-nort-rt-variants.patch
+++ b/debian/patches-rt/preempt-nort-rt-variants.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 24 Jul 2009 12:38:56 +0200
Subject: preempt: Provide preempt_*_(no)rt variants
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
RT needs a few preempt_disable/enable points which are not necessary
otherwise. Implement variants to avoid #ifdeffery.
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -178,7 +178,11 @@ do { \
+@@ -187,7 +187,11 @@ do { \
preempt_count_dec(); \
} while (0)
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
-@@ -295,6 +299,18 @@ do { \
+@@ -304,6 +308,18 @@ do { \
set_preempt_need_resched(); \
} while (0)
diff --git a/debian/patches-rt/printk-devkmsg-llseek-reset-clear-if-it-is-lost.patch b/debian/patches-rt/printk-devkmsg-llseek-reset-clear-if-it-is-lost.patch
index 4741569ef..8d336a2c1 100644
--- a/debian/patches-rt/printk-devkmsg-llseek-reset-clear-if-it-is-lost.patch
+++ b/debian/patches-rt/printk-devkmsg-llseek-reset-clear-if-it-is-lost.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Fri, 22 Feb 2019 23:02:44 +0100
Subject: [PATCH] printk: devkmsg: llseek: reset clear if it is lost
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
SEEK_DATA will seek to the last clear record. If this clear record
is no longer in the ring buffer, devkmsg_llseek() will go into an
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -725,6 +725,7 @@ static loff_t devkmsg_llseek(struct file
+@@ -751,6 +751,7 @@ static loff_t devkmsg_llseek(struct file
{
struct devkmsg_user *user = file->private_data;
loff_t ret;
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!user)
return -EBADF;
-@@ -747,7 +748,7 @@ static loff_t devkmsg_llseek(struct file
+@@ -773,7 +774,7 @@ static loff_t devkmsg_llseek(struct file
* changes no global state, and does not clear anything.
*/
for (;;) {
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ret = prb_iter_seek(&user->iter, clear_seq);
if (ret > 0) {
/* seeked to clear seq */
-@@ -764,6 +765,10 @@ static loff_t devkmsg_llseek(struct file
+@@ -790,6 +791,10 @@ static loff_t devkmsg_llseek(struct file
break;
}
/* iterator invalid, start over */
diff --git a/debian/patches-rt/printk-kmsg_dump-remove-mutex-usage.patch b/debian/patches-rt/printk-kmsg_dump-remove-mutex-usage.patch
index e3d259e7a..edc96b0c1 100644
--- a/debian/patches-rt/printk-kmsg_dump-remove-mutex-usage.patch
+++ b/debian/patches-rt/printk-kmsg_dump-remove-mutex-usage.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Wed, 24 Apr 2019 16:36:04 +0200
Subject: [PATCH] printk: kmsg_dump: remove mutex usage
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The kmsg dumper can be called from any context, but the dumping
helpers were using a mutex to synchronize the iterator against
@@ -20,16 +20,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -359,8 +359,6 @@ static u64 syslog_seq;
+@@ -369,8 +369,6 @@ static u64 syslog_seq;
static size_t syslog_partial;
static bool syslog_time;
-static DEFINE_MUTEX(kmsg_dump_lock);
-
- /* the last printk record at the last 'clear' command */
+ /* the next printk record to read after the last 'clear' command */
static u64 clear_seq;
-@@ -2820,6 +2818,7 @@ module_param_named(always_kmsg_dump, alw
+@@ -2867,6 +2865,7 @@ module_param_named(always_kmsg_dump, alw
*/
void kmsg_dump(enum kmsg_dump_reason reason)
{
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct kmsg_dumper *dumper;
if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
-@@ -2830,16 +2829,18 @@ void kmsg_dump(enum kmsg_dump_reason rea
+@@ -2877,16 +2876,18 @@ void kmsg_dump(enum kmsg_dump_reason rea
if (dumper->max_reason && reason > dumper->max_reason)
continue;
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
rcu_read_unlock();
}
-@@ -2951,9 +2952,7 @@ bool kmsg_dump_get_line(struct kmsg_dump
+@@ -2998,9 +2999,7 @@ bool kmsg_dump_get_line(struct kmsg_dump
{
bool ret;
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -3105,9 +3104,7 @@ void kmsg_dump_rewind_nolock(struct kmsg
+@@ -3152,9 +3151,7 @@ void kmsg_dump_rewind_nolock(struct kmsg
*/
void kmsg_dump_rewind(struct kmsg_dumper *dumper)
{
diff --git a/debian/patches-rt/printk-only-allow-kernel-to-emergency-message.patch b/debian/patches-rt/printk-only-allow-kernel-to-emergency-message.patch
index 00ef366ac..598279009 100644
--- a/debian/patches-rt/printk-only-allow-kernel-to-emergency-message.patch
+++ b/debian/patches-rt/printk-only-allow-kernel-to-emergency-message.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Sun, 17 Feb 2019 03:11:20 +0100
Subject: [PATCH] printk: only allow kernel to emergency message
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Emergency messages exist as a mechanism for the kernel to
communicate critical information to users. It is not meant for
@@ -11,12 +11,12 @@ processed by the emergency message code.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/printk/printk.c | 18 ++++++++++++------
- 1 file changed, 12 insertions(+), 6 deletions(-)
+ kernel/printk/printk.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1700,7 +1700,8 @@ static void printk_write_history(struct
+@@ -1744,7 +1744,8 @@ static void printk_write_history(struct
* The console_lock must be held.
*/
static void call_console_drivers(u64 seq, const char *ext_text, size_t ext_len,
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct console *con;
-@@ -1720,13 +1721,14 @@ static void call_console_drivers(u64 seq
+@@ -1764,13 +1765,14 @@ static void call_console_drivers(u64 seq
con->wrote_history = 1;
con->printk_seq = seq - 1;
}
@@ -43,20 +43,19 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* skip emergency messages, already printed */
if (con->printk_seq < seq)
con->printk_seq = seq;
-@@ -1894,7 +1896,11 @@ asmlinkage int vprintk_emit(int facility
+@@ -1941,7 +1943,10 @@ asmlinkage int vprintk_emit(int facility
* - text points to beginning of text
* - there is room before text for prefix
*/
- printk_emergency(rbuf, level & 7, ts_nsec, cpu, text, text_len);
+ if (facility == 0) {
+ /* only the kernel can create emergency messages */
-+ printk_emergency(rbuf, level & 7, ts_nsec, cpu,
-+ text, text_len);
++ printk_emergency(rbuf, level & 7, ts_nsec, cpu, text, text_len);
+ }
if ((lflags & LOG_CONT) || !(lflags & LOG_NEWLINE)) {
- cont_add(ctx, cpu, facility, level, lflags, text, text_len);
-@@ -2657,8 +2663,8 @@ static int printk_kthread_func(void *dat
+ cont_add(ctx, cpu, caller_id, facility, level, lflags, text, text_len);
+@@ -2705,8 +2710,8 @@ static int printk_kthread_func(void *dat
&len, printk_time);
console_lock();
diff --git a/debian/patches-rt/printk-print-rate-limitted-message-as-info.patch b/debian/patches-rt/printk-print-rate-limitted-message-as-info.patch
index 168832245..2d3c109b1 100644
--- a/debian/patches-rt/printk-print-rate-limitted-message-as-info.patch
+++ b/debian/patches-rt/printk-print-rate-limitted-message-as-info.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 22 Feb 2019 12:47:13 +0100
Subject: [PATCH] printk: print "rate-limitted" message as info
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
If messages which are injected via kmsg are dropped then they don't need
to be printed as warnings. This is to avoid latency spikes if the
diff --git a/debian/patches-rt/printk-set-deferred-to-default-loglevel-enforce-mask.patch b/debian/patches-rt/printk-set-deferred-to-default-loglevel-enforce-mask.patch
index 69412ad92..882b48e86 100644
--- a/debian/patches-rt/printk-set-deferred-to-default-loglevel-enforce-mask.patch
+++ b/debian/patches-rt/printk-set-deferred-to-default-loglevel-enforce-mask.patch
@@ -1,7 +1,7 @@
From: John Ogness <john.ogness@linutronix.de>
Date: Thu, 14 Feb 2019 23:13:30 +0100
Subject: [PATCH] printk: set deferred to default loglevel, enforce mask
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
All messages printed via vpritnk_deferred() were being
automatically treated as emergency messages.
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1894,7 +1894,7 @@ asmlinkage int vprintk_emit(int facility
+@@ -1941,7 +1941,7 @@ asmlinkage int vprintk_emit(int facility
* - text points to beginning of text
* - there is room before text for prefix
*/
@@ -27,8 +27,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ printk_emergency(rbuf, level & 7, ts_nsec, cpu, text, text_len);
if ((lflags & LOG_CONT) || !(lflags & LOG_NEWLINE)) {
- cont_add(ctx, cpu, facility, level, lflags, text, text_len);
-@@ -2686,7 +2686,7 @@ late_initcall(init_printk_kthread);
+ cont_add(ctx, cpu, caller_id, facility, level, lflags, text, text_len);
+@@ -2734,7 +2734,7 @@ late_initcall(init_printk_kthread);
static int vprintk_deferred(const char *fmt, va_list args)
{
diff --git a/debian/patches-rt/psi-replace-delayed-work-with-timer-work.patch b/debian/patches-rt/psi-replace-delayed-work-with-timer-work.patch
deleted file mode 100644
index 947cc16ee..000000000
--- a/debian/patches-rt/psi-replace-delayed-work-with-timer-work.patch
+++ /dev/null
@@ -1,140 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 12 Feb 2019 15:03:03 +0100
-Subject: [PATCH] psi: replace delayed work with timer + work
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-psi_task_change() is invoked with disabled interrupts and this does not
-allow to use schedule_delayed_work().
-
-Replace schedule_delayed_work() with a timer which schedules the work
-immediately.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/psi_types.h | 3 ++-
- kernel/sched/psi.c | 39 ++++++++++++++++++++++++++++++---------
- 2 files changed, 32 insertions(+), 10 deletions(-)
-
---- a/include/linux/psi_types.h
-+++ b/include/linux/psi_types.h
-@@ -76,7 +76,8 @@ struct psi_group {
- u64 total_prev[NR_PSI_STATES - 1];
- u64 last_update;
- u64 next_update;
-- struct delayed_work clock_work;
-+ struct work_struct clock_work;
-+ struct timer_list clock_work_timer;
-
- /* Total stall times and sampled pressure averages */
- u64 total[NR_PSI_STATES - 1];
---- a/kernel/sched/psi.c
-+++ b/kernel/sched/psi.c
-@@ -124,6 +124,7 @@
- * sampling of the aggregate task states would be.
- */
-
-+#include <linux/sched.h>
- #include "../workqueue_internal.h"
- #include <linux/sched/loadavg.h>
- #include <linux/seq_file.h>
-@@ -131,7 +132,6 @@
- #include <linux/seqlock.h>
- #include <linux/cgroup.h>
- #include <linux/module.h>
--#include <linux/sched.h>
- #include <linux/psi.h>
- #include "sched.h"
-
-@@ -166,6 +166,7 @@ static struct psi_group psi_system = {
- };
-
- static void psi_update_work(struct work_struct *work);
-+static void psi_sched_update_work(struct timer_list *t);
-
- static void group_init(struct psi_group *group)
- {
-@@ -174,7 +175,8 @@ static void group_init(struct psi_group
- for_each_possible_cpu(cpu)
- seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
- group->next_update = sched_clock() + psi_period;
-- INIT_DELAYED_WORK(&group->clock_work, psi_update_work);
-+ INIT_WORK(&group->clock_work, psi_update_work);
-+ timer_setup(&group->clock_work_timer, psi_sched_update_work, 0);
- mutex_init(&group->stat_lock);
- }
-
-@@ -367,14 +369,14 @@ static bool update_stats(struct psi_grou
- return nonidle_total;
- }
-
-+static void psi_sched_delayed_work(struct psi_group *group, unsigned long delay);
-+
- static void psi_update_work(struct work_struct *work)
- {
-- struct delayed_work *dwork;
- struct psi_group *group;
- bool nonidle;
-
-- dwork = to_delayed_work(work);
-- group = container_of(dwork, struct psi_group, clock_work);
-+ group = container_of(work, struct psi_group, clock_work);
-
- /*
- * If there is task activity, periodically fold the per-cpu
-@@ -393,7 +395,7 @@ static void psi_update_work(struct work_
- now = sched_clock();
- if (group->next_update > now)
- delay = nsecs_to_jiffies(group->next_update - now) + 1;
-- schedule_delayed_work(dwork, delay);
-+ psi_sched_delayed_work(group, delay);
- }
- }
-
-@@ -507,6 +509,20 @@ static struct psi_group *iterate_groups(
- return &psi_system;
- }
-
-+static void psi_sched_update_work(struct timer_list *t)
-+{
-+ struct psi_group *group = from_timer(group, t, clock_work_timer);
-+
-+ schedule_work(&group->clock_work);
-+}
-+
-+static void psi_sched_delayed_work(struct psi_group *group, unsigned long delay)
-+{
-+ if (!timer_pending(&group->clock_work_timer) &&
-+ !work_pending(&group->clock_work))
-+ mod_timer(&group->clock_work_timer, delay);
-+}
-+
- void psi_task_change(struct task_struct *task, int clear, int set)
- {
- int cpu = task_cpu(task);
-@@ -540,10 +556,14 @@ void psi_task_change(struct task_struct
- wq_worker_last_func(task) == psi_update_work))
- wake_clock = false;
-
-+ if (wake_clock) {
-+ if (task_is_ktimer_softirqd(task))
-+ wake_clock = false;
-+ }
- while ((group = iterate_groups(task, &iter))) {
- psi_group_change(group, cpu, clear, set);
-- if (wake_clock && !delayed_work_pending(&group->clock_work))
-- schedule_delayed_work(&group->clock_work, PSI_FREQ);
-+ if (wake_clock)
-+ psi_sched_delayed_work(group, PSI_FREQ);
- }
- }
-
-@@ -640,7 +660,8 @@ void psi_cgroup_free(struct cgroup *cgro
- if (static_branch_likely(&psi_disabled))
- return;
-
-- cancel_delayed_work_sync(&cgroup->psi.clock_work);
-+ del_timer_sync(&cgroup->psi.clock_work_timer);
-+ cancel_work_sync(&cgroup->psi.clock_work);
- free_percpu(cgroup->psi.pcpu);
- }
-
diff --git a/debian/patches-rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/debian/patches-rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index e6662baa7..4eed4533b 100644
--- a/debian/patches-rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/debian/patches-rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 29 Aug 2013 18:21:04 +0200
Subject: ptrace: fix ptrace vs tasklist_lock race
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
As explained by Alexander Fyodorov <halcy@yandex.ru>:
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -102,12 +102,8 @@ struct task_group;
+@@ -103,12 +103,8 @@ struct task_group;
__TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
TASK_PARKED)
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0 && \
(task->state & TASK_NOLOAD) == 0)
-@@ -1720,6 +1716,51 @@ static inline int test_tsk_need_resched(
+@@ -1723,6 +1719,51 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -116,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irq(&task->sighand->siglock);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1367,6 +1367,18 @@ int migrate_swap(struct task_struct *cur
+@@ -1402,6 +1402,18 @@ int migrate_swap(struct task_struct *cur
}
#endif /* CONFIG_NUMA_BALANCING */
@@ -135,7 +135,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* wait_task_inactive - wait for a thread to unschedule.
*
-@@ -1411,7 +1423,7 @@ unsigned long wait_task_inactive(struct
+@@ -1446,7 +1458,7 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
cpu_relax();
}
-@@ -1426,7 +1438,8 @@ unsigned long wait_task_inactive(struct
+@@ -1461,7 +1473,8 @@ unsigned long wait_task_inactive(struct
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
diff --git a/debian/patches-rt/radix-tree-use-local-locks.patch b/debian/patches-rt/radix-tree-use-local-locks.patch
index 1d106f63c..ca0a61c27 100644
--- a/debian/patches-rt/radix-tree-use-local-locks.patch
+++ b/debian/patches-rt/radix-tree-use-local-locks.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 25 Jan 2017 16:34:27 +0100
Subject: [PATCH] radix-tree: use local locks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The preload functionality uses per-CPU variables and preempt-disable to
ensure that it does not switch CPUs during its usage. This patch adds
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* idr_for_each_entry() - Iterate over an IDR's elements of a given type.
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
-@@ -239,6 +239,7 @@ unsigned int radix_tree_gang_lookup(cons
+@@ -226,6 +226,7 @@ unsigned int radix_tree_gang_lookup(cons
unsigned int max_items);
int radix_tree_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload(gfp_t gfp_mask);
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *,
unsigned long index, unsigned int tag);
-@@ -256,11 +257,6 @@ unsigned int radix_tree_gang_lookup_tag_
+@@ -243,11 +244,6 @@ unsigned int radix_tree_gang_lookup_tag_
unsigned int max_items, unsigned int tag);
int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
unsigned long max);
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
-@@ -39,7 +39,7 @@
+@@ -26,7 +26,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/xarray.h>
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Radix tree node cache.
-@@ -85,6 +85,7 @@ struct radix_tree_preload {
+@@ -72,6 +72,7 @@ struct radix_tree_preload {
struct radix_tree_node *nodes;
};
static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline struct radix_tree_node *entry_to_node(void *ptr)
{
-@@ -282,12 +283,13 @@ radix_tree_node_alloc(gfp_t gfp_mask, st
+@@ -269,12 +270,13 @@ radix_tree_node_alloc(gfp_t gfp_mask, st
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
*/
@@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Update the allocation stack trace as this is more useful
* for debugging.
-@@ -353,14 +355,14 @@ static __must_check int __radix_tree_pre
+@@ -340,14 +342,14 @@ static __must_check int __radix_tree_pre
*/
gfp_mask &= ~__GFP_ACCOUNT;
@@ -105,7 +105,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr < nr) {
node->parent = rtp->nodes;
-@@ -402,11 +404,17 @@ int radix_tree_maybe_preload(gfp_t gfp_m
+@@ -389,11 +391,17 @@ int radix_tree_maybe_preload(gfp_t gfp_m
if (gfpflags_allow_blocking(gfp_mask))
return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
/* Preloading doesn't help anything with this gfp mask, skip it */
@@ -124,7 +124,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static unsigned radix_tree_load_root(const struct radix_tree_root *root,
struct radix_tree_node **nodep, unsigned long *maxindex)
{
-@@ -1491,10 +1499,16 @@ EXPORT_SYMBOL(radix_tree_tagged);
+@@ -1478,10 +1486,16 @@ EXPORT_SYMBOL(radix_tree_tagged);
void idr_preload(gfp_t gfp_mask)
{
if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
diff --git a/debian/patches-rt/random-avoid-preempt_disable-ed-section.patch b/debian/patches-rt/random-avoid-preempt_disable-ed-section.patch
deleted file mode 100644
index 1ec2654d1..000000000
--- a/debian/patches-rt/random-avoid-preempt_disable-ed-section.patch
+++ /dev/null
@@ -1,75 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 12 May 2017 15:46:17 +0200
-Subject: [PATCH] random: avoid preempt_disable()ed section
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-extract_crng() will use sleeping locks while in a preempt_disable()
-section due to get_cpu_var().
-Work around it with local_locks.
-
-Cc: stable-rt@vger.kernel.org # where it applies to
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/char/random.c | 11 +++++++----
- 1 file changed, 7 insertions(+), 4 deletions(-)
-
---- a/drivers/char/random.c
-+++ b/drivers/char/random.c
-@@ -265,6 +265,7 @@
- #include <linux/syscalls.h>
- #include <linux/completion.h>
- #include <linux/uuid.h>
-+#include <linux/locallock.h>
- #include <crypto/chacha.h>
-
- #include <asm/processor.h>
-@@ -2222,6 +2223,7 @@ static rwlock_t batched_entropy_reset_lo
- * at any point prior.
- */
- static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
-+static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_u64_lock);
- u64 get_random_u64(void)
- {
- u64 ret;
-@@ -2242,7 +2244,7 @@ u64 get_random_u64(void)
- warn_unseeded_randomness(&previous);
-
- use_lock = READ_ONCE(crng_init) < 2;
-- batch = &get_cpu_var(batched_entropy_u64);
-+ batch = &get_locked_var(batched_entropy_u64_lock, batched_entropy_u64);
- if (use_lock)
- read_lock_irqsave(&batched_entropy_reset_lock, flags);
- if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
-@@ -2252,12 +2254,13 @@ u64 get_random_u64(void)
- ret = batch->entropy_u64[batch->position++];
- if (use_lock)
- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
-- put_cpu_var(batched_entropy_u64);
-+ put_locked_var(batched_entropy_u64_lock, batched_entropy_u64);
- return ret;
- }
- EXPORT_SYMBOL(get_random_u64);
-
- static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
-+static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_u32_lock);
- u32 get_random_u32(void)
- {
- u32 ret;
-@@ -2272,7 +2275,7 @@ u32 get_random_u32(void)
- warn_unseeded_randomness(&previous);
-
- use_lock = READ_ONCE(crng_init) < 2;
-- batch = &get_cpu_var(batched_entropy_u32);
-+ batch = &get_locked_var(batched_entropy_u32_lock, batched_entropy_u32);
- if (use_lock)
- read_lock_irqsave(&batched_entropy_reset_lock, flags);
- if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
-@@ -2282,7 +2285,7 @@ u32 get_random_u32(void)
- ret = batch->entropy_u32[batch->position++];
- if (use_lock)
- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
-- put_cpu_var(batched_entropy_u32);
-+ put_locked_var(batched_entropy_u32_lock, batched_entropy_u32);
- return ret;
- }
- EXPORT_SYMBOL(get_random_u32);
diff --git a/debian/patches-rt/random-make-it-work-on-rt.patch b/debian/patches-rt/random-make-it-work-on-rt.patch
index 8dfa24b59..d1340e149 100644
--- a/debian/patches-rt/random-make-it-work-on-rt.patch
+++ b/debian/patches-rt/random-make-it-work-on-rt.patch
@@ -1,7 +1,7 @@
Subject: random: Make it work on rt
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 21 Aug 2012 20:38:50 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Delegate the random insertion to the forced threaded interrupt
handler. Store the return IP of the hard interrupt handler in the irq
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
-@@ -1228,28 +1228,27 @@ static __u32 get_reg(struct fast_pool *f
+@@ -1305,28 +1305,27 @@ static __u32 get_reg(struct fast_pool *f
return *ptr;
}
@@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
add_interrupt_bench(cycles);
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
-@@ -110,10 +110,12 @@ int hv_post_message(union hv_connection_
+@@ -97,10 +97,12 @@ int hv_post_message(union hv_connection_
static void hv_stimer0_isr(void)
{
struct hv_per_cpu_context *hv_cpu;
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static int hv_ce_set_next_event(unsigned long delta,
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
-@@ -1118,6 +1118,8 @@ static void vmbus_isr(void)
+@@ -1122,6 +1122,8 @@ static void vmbus_isr(void)
void *page_addr = hv_cpu->synic_event_page;
struct hv_message *msg;
union hv_synic_event_flags *event;
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bool handled = false;
if (unlikely(page_addr == NULL))
-@@ -1161,7 +1163,7 @@ static void vmbus_isr(void)
+@@ -1165,7 +1167,7 @@ static void vmbus_isr(void)
tasklet_schedule(&hv_cpu->msg_dpc);
}
@@ -94,7 +94,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
-@@ -71,6 +71,7 @@ struct irq_desc {
+@@ -72,6 +72,7 @@ struct irq_desc {
unsigned int irqs_unhandled;
atomic_t threads_handled;
int threads_handled_last;
@@ -135,7 +135,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
note_interrupt(desc, retval);
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -1068,6 +1068,12 @@ static int irq_thread(void *data)
+@@ -1075,6 +1075,12 @@ static int irq_thread(void *data)
if (action_ret == IRQ_WAKE_THREAD)
irq_wake_secondary(desc, action);
diff --git a/debian/patches-rt/rcu-Eliminate-softirq-processing-from-rcutree.patch b/debian/patches-rt/rcu-Eliminate-softirq-processing-from-rcutree.patch
index b37b82d34..334c36835 100644
--- a/debian/patches-rt/rcu-Eliminate-softirq-processing-from-rcutree.patch
+++ b/debian/patches-rt/rcu-Eliminate-softirq-processing-from-rcutree.patch
@@ -1,46 +1,58 @@
-From: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-Date: Mon, 4 Nov 2013 13:21:10 -0800
-Subject: rcu: Eliminate softirq processing from rcutree
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 20 Mar 2019 22:13:33 +0100
+Subject: [PATCH] rcu: Enable elimination of Tree-RCU softirq processing
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
-Running RCU out of softirq is a problem for some workloads that would
-like to manage RCU core processing independently of other softirq work,
-for example, setting kthread priority. This commit therefore moves the
-RCU core work from softirq to a per-CPU/per-flavor SCHED_OTHER kthread
-named rcuc. The SCHED_OTHER approach avoids the scalability problems
-that appeared with the earlier attempt to move RCU core processing to
-from softirq to kthreads. That said, kernels built with RCU_BOOST=y
-will run the rcuc kthreads at the RCU-boosting priority.
+Some workloads need to change kthread priority for RCU core processing
+without affecting other softirq work. This commit therefore introduces
+the rcutree.use_softirq kernel boot parameter, which moves the RCU core
+work from softirq to a per-CPU SCHED_OTHER kthread named rcuc. Use of
+SCHED_OTHER approach avoids the scalability problems that appeared
+with the earlier attempt to move RCU core processing to from softirq
+to kthreads. That said, kernels built with RCU_BOOST=y will run the
+rcuc kthreads at the RCU-boosting priority.
+
+Note that rcutree.use_softirq=0 must be specified to move RCU core
+processing to the rcuc kthreads: rcutree.use_softirq=1 is the default.
Reported-by: Thomas Gleixner <tglx@linutronix.de>
-Tested-by: Mike Galbraith <bitbucket@online.de>
-Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Tested-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+[ paulmck: Adjust for invoke_rcu_callbacks() only ever being invoked
+ from RCU core processing, in contrast to softirq->rcuc transition
+ in old mainline RCU priority boosting. ]
+[ paulmck: Avoid wakeups when scheduler might have invoked rcu_read_unlock()
+ while holding rq or pi locks, also possibly fixing a pre-existing latent
+ bug involving raise_softirq()-induced wakeups. ]
+Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
---
- include/linux/rcupdate.h | 3 -
- kernel/rcu/tree.c | 110 ++++++++++++++++++++++++++++++++++--
- kernel/rcu/tree.h | 4 -
- kernel/rcu/tree_plugin.h | 140 ++++-------------------------------------------
- 4 files changed, 117 insertions(+), 140 deletions(-)
+ Documentation/admin-guide/kernel-parameters.txt | 6 +
+ kernel/rcu/tree.c | 138 +++++++++++++++++++++---
+ kernel/rcu/tree.h | 2
+ kernel/rcu/tree_plugin.h | 134 ++---------------------
+ 4 files changed, 146 insertions(+), 134 deletions(-)
---- a/include/linux/rcupdate.h
-+++ b/include/linux/rcupdate.h
-@@ -295,7 +295,8 @@ static inline void rcu_preempt_sleep_che
- #define rcu_sleep_check() \
- do { \
- rcu_preempt_sleep_check(); \
-- RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
-+ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) \
-+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map),\
- "Illegal context switch in RCU-bh read-side critical section"); \
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
- "Illegal context switch in RCU-sched read-side critical section"); \
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -3752,6 +3752,12 @@
+ the propagation of recent CPU-hotplug changes up
+ the rcu_node combining tree.
+
++ rcutree.use_softirq= [KNL]
++ If set to zero, move all RCU_SOFTIRQ processing to
++ per-CPU rcuc kthreads. Defaults to a non-zero
++ value, meaning that RCU_SOFTIRQ is used by default.
++ Specify rcutree.use_softirq=0 to use rcuc kthreads.
++
+ rcutree.rcu_fanout_exact= [KNL]
+ Disable autobalancing of the rcu_node combining
+ tree. This is used by rcutorture, and might
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -62,6 +62,12 @@
- #include <linux/suspend.h>
- #include <linux/ftrace.h>
+@@ -51,6 +51,12 @@
#include <linux/tick.h>
+ #include <linux/sysrq.h>
+ #include <linux/kprobes.h>
+#include <linux/gfp.h>
+#include <linux/oom.h>
+#include <linux/smpboot.h>
@@ -50,31 +62,32 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "tree.h"
#include "rcu.h"
-@@ -2725,7 +2731,7 @@ EXPORT_SYMBOL_GPL(rcu_fwd_progress_check
- * structures. This may be called only from the CPU to whom the rdp
- * belongs.
- */
--static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
-+static __latent_entropy void rcu_process_callbacks(void)
+@@ -92,6 +98,9 @@ struct rcu_state rcu_state = {
+ /* Dump rcu_node combining tree at boot to verify correct setup. */
+ static bool dump_tree;
+ module_param(dump_tree, bool, 0444);
++/* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
++static bool use_softirq = 1;
++module_param(use_softirq, bool, 0444);
+ /* Control rcu_node-tree auto-balancing at boot time. */
+ static bool rcu_fanout_exact;
+ module_param(rcu_fanout_exact, bool, 0444);
+@@ -2253,7 +2262,7 @@ void rcu_force_quiescent_state(void)
+ EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
+
+ /* Perform RCU core processing work for the current CPU. */
+-static __latent_entropy void rcu_core(struct softirq_action *unused)
++static __latent_entropy void rcu_core(void)
{
unsigned long flags;
struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
-@@ -2767,6 +2773,8 @@ static __latent_entropy void rcu_process
+@@ -2295,29 +2304,131 @@ static __latent_entropy void rcu_core(st
trace_rcu_utilization(TPS("End RCU core"));
}
-+static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
-+
- /*
- * Schedule RCU callback invocation. If the running implementation of RCU
- * does not support RCU priority boosting, just do a direct call, otherwise
-@@ -2778,18 +2786,105 @@ static void invoke_rcu_callbacks(struct
- {
- if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
- return;
-- if (likely(!rcu_state.boost)) {
-- rcu_do_batch(rdp);
-+ rcu_do_batch(rdp);
++static void rcu_core_si(struct softirq_action *h)
++{
++ rcu_core();
+}
+
+static void rcu_wake_cond(struct task_struct *t, int status)
@@ -87,32 +100,65 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ wake_up_process(t);
+}
+
-+/*
-+ * Wake up this CPU's rcuc kthread to do RCU core processing.
-+ */
-+static void invoke_rcu_core(void)
++static void invoke_rcu_core_kthread(void)
+{
-+ unsigned long flags;
+ struct task_struct *t;
++ unsigned long flags;
+
-+ if (!cpu_online(smp_processor_id()))
- return;
+ local_irq_save(flags);
-+ __this_cpu_write(rcu_cpu_has_work, 1);
-+ t = __this_cpu_read(rcu_cpu_kthread_task);
-+ if (t != NULL && current != t)
-+ rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status));
++ __this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
++ t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
++ if (t != NULL && t != current)
++ rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
+ local_irq_restore(flags);
+}
+
+ /*
+- * Schedule RCU callback invocation. If the running implementation of RCU
+- * does not support RCU priority boosting, just do a direct call, otherwise
+- * wake up the per-CPU kernel kthread. Note that because we are running
+- * on the current CPU with softirqs disabled, the rcu_cpu_kthread_task
+- * cannot disappear out from under us.
++ * Do RCU callback invocation. Not that if we are running !use_softirq,
++ * we are already in the rcuc kthread. If callbacks are offloaded, then
++ * ->cblist is always empty, so we don't get here. Therefore, we only
++ * ever need to check for the scheduler being operational (some callbacks
++ * do wakeups, so we do need the scheduler).
+ */
+ static void invoke_rcu_callbacks(struct rcu_data *rdp)
+ {
+ if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
+ return;
+- if (likely(!rcu_state.boost)) {
+- rcu_do_batch(rdp);
+- return;
+- }
+- invoke_rcu_callbacks_kthread();
++ rcu_do_batch(rdp);
+ }
+
++/*
++ * Wake up this CPU's rcuc kthread to do RCU core processing.
++ */
+ static void invoke_rcu_core(void)
+ {
+- if (cpu_online(smp_processor_id()))
++ if (!cpu_online(smp_processor_id()))
++ return;
++ if (use_softirq)
+ raise_softirq(RCU_SOFTIRQ);
++ else
++ invoke_rcu_core_kthread();
++}
++
+static void rcu_cpu_kthread_park(unsigned int cpu)
+{
-+ per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
++ per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
+}
+
+static int rcu_cpu_kthread_should_run(unsigned int cpu)
+{
-+ return __this_cpu_read(rcu_cpu_has_work);
++ return __this_cpu_read(rcu_data.rcu_cpu_has_work);
+}
+
+/*
@@ -122,39 +168,36 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ */
+static void rcu_cpu_kthread(unsigned int cpu)
+{
-+ unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
-+ char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
++ unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
++ char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
+ int spincnt;
+
+ for (spincnt = 0; spincnt < 10; spincnt++) {
+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
+ local_bh_disable();
+ *statusp = RCU_KTHREAD_RUNNING;
-+ this_cpu_inc(rcu_cpu_kthread_loops);
+ local_irq_disable();
+ work = *workp;
+ *workp = 0;
+ local_irq_enable();
+ if (work)
-+ rcu_process_callbacks();
++ rcu_core();
+ local_bh_enable();
+ if (*workp == 0) {
+ trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
+ *statusp = RCU_KTHREAD_WAITING;
+ return;
+ }
- }
-- invoke_rcu_callbacks_kthread();
++ }
+ *statusp = RCU_KTHREAD_YIELDING;
+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
+ schedule_timeout_interruptible(2);
+ trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
+ *statusp = RCU_KTHREAD_WAITING;
- }
-
--static void invoke_rcu_core(void)
++}
++
+static struct smp_hotplug_thread rcu_cpu_thread_spec = {
-+ .store = &rcu_cpu_kthread_task,
++ .store = &rcu_data.rcu_cpu_kthread_task,
+ .thread_should_run = rcu_cpu_kthread_should_run,
+ .thread_fn = rcu_cpu_kthread,
+ .thread_comm = "rcuc/%u",
@@ -166,44 +209,34 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * Spawn per-CPU RCU core processing kthreads.
+ */
+static int __init rcu_spawn_core_kthreads(void)
- {
-- if (cpu_online(smp_processor_id()))
-- raise_softirq(RCU_SOFTIRQ);
++{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
-+ per_cpu(rcu_cpu_has_work, cpu) = 0;
-+ WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), "%s: Could not start rcub kthread, OOM is now expected behavior\n", __func__);
++ per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
++ if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq)
++ return 0;
++ WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
++ "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
+ return 0;
}
+early_initcall(rcu_spawn_core_kthreads);
/*
* Handle any core-RCU processing required by a call_rcu() invocation.
-@@ -3786,7 +3881,6 @@ void __init rcu_init(void)
+@@ -3355,7 +3466,8 @@ void __init rcu_init(void)
rcu_init_one();
if (dump_tree)
rcu_dump_rcu_node_tree();
-- open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
+- open_softirq(RCU_SOFTIRQ, rcu_core);
++ if (use_softirq)
++ open_softirq(RCU_SOFTIRQ, rcu_core_si);
/*
* We don't need protection against CPU-hotplug here because
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
-@@ -402,12 +402,10 @@ static const char *tp_rcu_varname __used
-
- int rcu_dynticks_snap(struct rcu_data *rdp);
-
--#ifdef CONFIG_RCU_BOOST
- DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
- DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
- DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
- DECLARE_PER_CPU(char, rcu_cpu_has_work);
--#endif /* #ifdef CONFIG_RCU_BOOST */
-
- /* Forward declarations for rcutree_plugin.h */
- static void rcu_bootup_announce(void);
-@@ -425,8 +423,8 @@ void call_rcu(struct rcu_head *head, rcu
+@@ -407,8 +407,8 @@ void call_rcu(struct rcu_head *head, rcu
static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
@@ -215,8 +248,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void rcu_cleanup_after_idle(void);
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
-@@ -24,17 +24,6 @@
- * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+@@ -11,29 +11,7 @@
+ * Paul E. McKenney <paulmck@linux.ibm.com>
*/
-#include <linux/delay.h>
@@ -229,14 +262,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-#include "../time/tick-internal.h"
-
-#ifdef CONFIG_RCU_BOOST
--
#include "../locking/rtmutex_common.h"
-
- /*
-@@ -45,19 +34,6 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kth
- DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
- DEFINE_PER_CPU(char, rcu_cpu_has_work);
-
-#else /* #ifdef CONFIG_RCU_BOOST */
-
-/*
@@ -249,20 +275,28 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-#define rt_mutex_futex_unlock(x) WARN_ON_ONCE(1)
-
-#endif /* #else #ifdef CONFIG_RCU_BOOST */
--
+
#ifdef CONFIG_RCU_NOCB_CPU
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
- static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
-@@ -656,7 +632,7 @@ static void rcu_read_unlock_special(stru
+@@ -94,6 +72,8 @@ static void __init rcu_bootup_announce_o
+ pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay);
+ if (gp_cleanup_delay)
+ pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_cleanup_delay);
++ if (!use_softirq)
++ pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.\n");
+ if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG))
+ pr_info("\tRCU debug extended QS entry/exit.\n");
+ rcupdate_announce_bootup_oddness();
+@@ -631,7 +611,7 @@ static void rcu_read_unlock_special(stru
+ if (preempt_bh_were_disabled || irqs_were_disabled) {
+ WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, false);
/* Need to defer quiescent state until everything is enabled. */
- if (irqs_were_disabled) {
+- if (irqs_were_disabled) {
++ if (irqs_were_disabled && use_softirq) {
/* Enabling irqs does not reschedule, so... */
-- raise_softirq_irqoff(RCU_SOFTIRQ);
-+ invoke_rcu_core();
+ raise_softirq_irqoff(RCU_SOFTIRQ);
} else {
- /* Enabling BH or preempt does reschedule, so... */
- set_tsk_need_resched(current);
-@@ -1154,18 +1130,21 @@ dump_blkd_tasks(struct rcu_node *rnp, in
+@@ -948,18 +928,21 @@ dump_blkd_tasks(struct rcu_node *rnp, in
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
@@ -292,7 +326,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
* or ->boost_tasks, advancing the pointer to the next task in the
-@@ -1304,23 +1283,6 @@ static void rcu_initiate_boost(struct rc
+@@ -1095,23 +1078,6 @@ static void rcu_initiate_boost(struct rc
}
/*
@@ -303,11 +337,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- unsigned long flags;
-
- local_irq_save(flags);
-- __this_cpu_write(rcu_cpu_has_work, 1);
-- if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
-- current != __this_cpu_read(rcu_cpu_kthread_task)) {
-- rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
-- __this_cpu_read(rcu_cpu_kthread_status));
+- __this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
+- if (__this_cpu_read(rcu_data.rcu_cpu_kthread_task) != NULL &&
+- current != __this_cpu_read(rcu_data.rcu_cpu_kthread_task)) {
+- rcu_wake_cond(__this_cpu_read(rcu_data.rcu_cpu_kthread_task),
+- __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
- }
- local_irq_restore(flags);
-}
@@ -316,15 +350,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Is the current CPU running the RCU-callbacks kthread?
* Caller must have preemption disabled.
*/
-@@ -1373,65 +1335,6 @@ static int rcu_spawn_one_boost_kthread(s
+@@ -1164,59 +1130,6 @@ static int rcu_spawn_one_boost_kthread(s
return 0;
}
--static void rcu_kthread_do_work(void)
--{
-- rcu_do_batch(this_cpu_ptr(&rcu_data));
--}
--
-static void rcu_cpu_kthread_setup(unsigned int cpu)
-{
- struct sched_param sp;
@@ -335,12 +364,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
-static void rcu_cpu_kthread_park(unsigned int cpu)
-{
-- per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
+- per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
-}
-
-static int rcu_cpu_kthread_should_run(unsigned int cpu)
-{
-- return __this_cpu_read(rcu_cpu_has_work);
+- return __this_cpu_read(rcu_data.rcu_cpu_has_work);
-}
-
-/*
@@ -350,21 +379,20 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- */
-static void rcu_cpu_kthread(unsigned int cpu)
-{
-- unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
-- char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
+- unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
+- char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
- int spincnt;
-
- for (spincnt = 0; spincnt < 10; spincnt++) {
- trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
- local_bh_disable();
- *statusp = RCU_KTHREAD_RUNNING;
-- this_cpu_inc(rcu_cpu_kthread_loops);
- local_irq_disable();
- work = *workp;
- *workp = 0;
- local_irq_enable();
- if (work)
-- rcu_kthread_do_work();
+- rcu_do_batch(this_cpu_ptr(&rcu_data));
- local_bh_enable();
- if (*workp == 0) {
- trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
@@ -382,12 +410,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Set the per-rcu_node kthread's affinity to cover all CPUs that are
* served by the rcu_node in question. The CPU hotplug lock is still
-@@ -1462,27 +1365,13 @@ static void rcu_boost_kthread_setaffinit
+@@ -1247,27 +1160,13 @@ static void rcu_boost_kthread_setaffinit
free_cpumask_var(cm);
}
-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
-- .store = &rcu_cpu_kthread_task,
+- .store = &rcu_data.rcu_cpu_kthread_task,
- .thread_should_run = rcu_cpu_kthread_should_run,
- .thread_fn = rcu_cpu_kthread,
- .thread_comm = "rcuc/%u",
@@ -404,13 +432,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- int cpu;
- for_each_possible_cpu(cpu)
-- per_cpu(rcu_cpu_has_work, cpu) = 0;
+- per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
- if (WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), "%s: Could not start rcub kthread, OOM is now expected behavior\n", __func__))
- return;
rcu_for_each_leaf_node(rnp)
(void)rcu_spawn_one_boost_kthread(rnp);
}
-@@ -1505,11 +1394,6 @@ static void rcu_initiate_boost(struct rc
+@@ -1290,11 +1189,6 @@ static void rcu_initiate_boost(struct rc
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
diff --git a/debian/patches-rt/rcu-disable-rcu-fast-no-hz-on-rt.patch b/debian/patches-rt/rcu-disable-rcu-fast-no-hz-on-rt.patch
deleted file mode 100644
index e08558c8d..000000000
--- a/debian/patches-rt/rcu-disable-rcu-fast-no-hz-on-rt.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-Subject: rcu: Disable RCU_FAST_NO_HZ on RT
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Sun, 28 Oct 2012 13:26:09 +0000
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-This uses a timer_list timer from the irq disabled guts of the idle
-code. Disable it for now to prevent wreckage.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- kernel/rcu/Kconfig | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/kernel/rcu/Kconfig
-+++ b/kernel/rcu/Kconfig
-@@ -172,7 +172,7 @@ config RCU_FANOUT_LEAF
-
- config RCU_FAST_NO_HZ
- bool "Accelerate last non-dyntick-idle CPU's grace periods"
-- depends on NO_HZ_COMMON && SMP && RCU_EXPERT
-+ depends on NO_HZ_COMMON && SMP && RCU_EXPERT && !PREEMPT_RT_FULL
- default n
- help
- This option permits CPUs to enter dynticks-idle state even if
diff --git a/debian/patches-rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch b/debian/patches-rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
index 700b44f1a..a507f67d7 100644
--- a/debian/patches-rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
+++ b/debian/patches-rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
@@ -1,7 +1,7 @@
From: Julia Cartwright <julia@ni.com>
Date: Wed, 12 Oct 2016 11:21:14 -0500
Subject: [PATCH] rcu: enable rcu_normal_after_boot by default for RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The forcing of an expedited grace period is an expensive and very
RT-application unfriendly operation, as it forcibly preempts all running
@@ -15,17 +15,20 @@ Acked-by: Paul E. McKenney <paulmck@linux.ibm.com>
Signed-off-by: Julia Cartwright <julia@ni.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/rcu/update.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
+ kernel/rcu/update.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
-@@ -68,7 +68,7 @@ extern int rcu_expedited; /* from sysctl
+@@ -55,8 +55,10 @@ extern int rcu_expedited; /* from sysctl
module_param(rcu_expedited, int, 0);
extern int rcu_normal; /* from sysctl */
module_param(rcu_normal, int, 0);
-static int rcu_normal_after_boot;
+static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
++#ifndef CONFIG_PREEMPT_RT_FULL
module_param(rcu_normal_after_boot, int, 0);
++#endif
#endif /* #ifndef CONFIG_TINY_RCU */
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/debian/patches-rt/rcu-make-RCU_BOOST-default-on-RT.patch b/debian/patches-rt/rcu-make-RCU_BOOST-default-on-RT.patch
index fb30de188..901c10c9e 100644
--- a/debian/patches-rt/rcu-make-RCU_BOOST-default-on-RT.patch
+++ b/debian/patches-rt/rcu-make-RCU_BOOST-default-on-RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 21 Mar 2014 20:19:05 +0100
Subject: rcu: make RCU_BOOST default on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Since it is no longer invoked from the softirq people run into OOM more
often if the priority of the RCU thread is too low. Making boosting
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/rcu/Kconfig
+++ b/kernel/rcu/Kconfig
-@@ -190,8 +190,8 @@ config RCU_FAST_NO_HZ
+@@ -161,8 +161,8 @@ config RCU_FAST_NO_HZ
config RCU_BOOST
bool "Enable RCU priority boosting"
diff --git a/debian/patches-rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch b/debian/patches-rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
index 831559f8c..cd8e77b0f 100644
--- a/debian/patches-rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
+++ b/debian/patches-rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
@@ -1,7 +1,7 @@
Subject: ARM: Initialize split page table locks for vector page
From: Frank Rowand <frank.rowand@am.sony.com>
Date: Sat, 1 Oct 2011 18:58:13 -0700
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Without this patch, ARM can not use SPLIT_PTLOCK_CPUS if
PREEMPT_RT_FULL=y because vectors_user_mapping() creates a
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
-@@ -328,6 +328,30 @@ unsigned long arch_randomize_brk(struct
+@@ -325,6 +325,30 @@ unsigned long arch_randomize_brk(struct
}
#ifdef CONFIG_MMU
diff --git a/debian/patches-rt/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch b/debian/patches-rt/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
index d89d679d9..e83e0c109 100644
--- a/debian/patches-rt/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
+++ b/debian/patches-rt/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
@@ -1,7 +1,7 @@
From: Daniel Bristot de Oliveira <bristot@redhat.com>
Date: Mon, 26 Jun 2017 17:07:15 +0200
Subject: rt: Increase/decrease the nr of migratory tasks when enabling/disabling migration
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
There is a problem in the migrate_disable()/enable() implementation
regarding the number of migratory tasks in the rt/dl RQs. The problem
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7152,6 +7152,47 @@ const u32 sched_prio_to_wmult[40] = {
+@@ -7159,6 +7159,47 @@ const u32 sched_prio_to_wmult[40] = {
#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
@@ -129,7 +129,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void migrate_disable(void)
{
struct task_struct *p = current;
-@@ -7175,10 +7216,9 @@ void migrate_disable(void)
+@@ -7182,10 +7223,9 @@ void migrate_disable(void)
}
preempt_disable();
@@ -142,7 +142,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_enable();
}
-@@ -7210,9 +7250,8 @@ void migrate_enable(void)
+@@ -7217,9 +7257,8 @@ void migrate_enable(void)
preempt_disable();
diff --git a/debian/patches-rt/rt-introduce-cpu-chill.patch b/debian/patches-rt/rt-introduce-cpu-chill.patch
index ac8a97b1a..da068632a 100644
--- a/debian/patches-rt/rt-introduce-cpu-chill.patch
+++ b/debian/patches-rt/rt-introduce-cpu-chill.patch
@@ -1,7 +1,7 @@
Subject: rt: Introduce cpu_chill()
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 07 Mar 2012 20:51:03 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Retry loops on RT might loop forever when the modifying side was
preempted. Add cpu_chill() to replace cpu_relax(). cpu_chill()
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
-@@ -64,4 +64,10 @@ static inline void ssleep(unsigned int s
+@@ -65,4 +65,10 @@ static inline void ssleep(unsigned int s
msleep(seconds * 1000);
}
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* defined(_LINUX_DELAY_H) */
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1878,6 +1878,38 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct
+@@ -1845,6 +1845,38 @@ SYSCALL_DEFINE2(nanosleep_time32, struct
}
#endif
diff --git a/debian/patches-rt/rt-local-irq-lock.patch b/debian/patches-rt/rt-local-irq-lock.patch
index 69e8be224..051a7127f 100644
--- a/debian/patches-rt/rt-local-irq-lock.patch
+++ b/debian/patches-rt/rt-local-irq-lock.patch
@@ -1,7 +1,7 @@
Subject: rt: Add local irq locks
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 20 Jun 2011 09:03:47 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Introduce locallock. For !RT this maps to preempt_disable()/
local_irq_disable() so there is not much that changes. For RT this will
diff --git a/debian/patches-rt/rt-preempt-base-config.patch b/debian/patches-rt/rt-preempt-base-config.patch
index 23395c4f5..92c85fe8e 100644
--- a/debian/patches-rt/rt-preempt-base-config.patch
+++ b/debian/patches-rt/rt-preempt-base-config.patch
@@ -1,7 +1,7 @@
Subject: rt: Provide PREEMPT_RT_BASE config switch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 17 Jun 2011 12:39:57 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Introduce PREEMPT_RT_BASE which enables parts of
PREEMPT_RT_FULL. Forces interrupt threading and enables some of the RT
@@ -14,7 +14,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
-@@ -1,3 +1,10 @@
+@@ -1,4 +1,11 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+config PREEMPT
+ bool
+ select PREEMPT_COUNT
@@ -25,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
choice
prompt "Preemption Model"
-@@ -34,10 +41,10 @@ config PREEMPT_VOLUNTARY
+@@ -35,10 +42,10 @@ config PREEMPT_VOLUNTARY
Select this if you are building a kernel for a desktop system.
@@ -38,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
help
This option reduces the latency of the kernel by making
-@@ -54,6 +61,14 @@ config PREEMPT
+@@ -55,6 +62,14 @@ config PREEMPT
embedded system with latency requirements in the milliseconds
range.
diff --git a/debian/patches-rt/rt-serial-warn-fix.patch b/debian/patches-rt/rt-serial-warn-fix.patch
index 2c63fe002..bed20a78a 100644
--- a/debian/patches-rt/rt-serial-warn-fix.patch
+++ b/debian/patches-rt/rt-serial-warn-fix.patch
@@ -1,7 +1,7 @@
Subject: rt: Improve the serial console PASS_LIMIT
From: Ingo Molnar <mingo@elte.hu>
Date: Wed Dec 14 13:05:54 CET 2011
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Beyond the warning:
diff --git a/debian/patches-rt/rtmutex-Make-lock_killable-work.patch b/debian/patches-rt/rtmutex-Make-lock_killable-work.patch
index bd8df0aa2..ad565b75b 100644
--- a/debian/patches-rt/rtmutex-Make-lock_killable-work.patch
+++ b/debian/patches-rt/rtmutex-Make-lock_killable-work.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 1 Apr 2017 12:50:59 +0200
Subject: [PATCH] rtmutex: Make lock_killable work
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Locking an rt mutex killable does not work because signal handling is
restricted to TASK_INTERRUPTIBLE.
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1201,18 +1201,13 @@ static int __sched
+@@ -1177,18 +1177,13 @@ static int __sched
if (try_to_take_rt_mutex(lock, current, waiter))
break;
diff --git a/debian/patches-rt/rtmutex-Provide-rt_mutex_slowlock_locked.patch b/debian/patches-rt/rtmutex-Provide-rt_mutex_slowlock_locked.patch
index a62bc9960..4882c42b5 100644
--- a/debian/patches-rt/rtmutex-Provide-rt_mutex_slowlock_locked.patch
+++ b/debian/patches-rt/rtmutex-Provide-rt_mutex_slowlock_locked.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 16:14:22 +0200
Subject: rtmutex: Provide rt_mutex_slowlock_locked()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
This is the inner-part of rt_mutex_slowlock(), required for rwsem-rt.
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1244,35 +1244,16 @@ static void rt_mutex_handle_deadlock(int
+@@ -1220,35 +1220,16 @@ static void rt_mutex_handle_deadlock(int
}
}
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
set_current_state(state);
-@@ -1280,16 +1261,16 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1256,16 +1237,16 @@ rt_mutex_slowlock(struct rt_mutex *lock,
if (unlikely(timeout))
hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
@@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -1297,6 +1278,34 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1273,6 +1254,34 @@ rt_mutex_slowlock(struct rt_mutex *lock,
* unconditionally. We might have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
@@ -122,7 +122,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* This is the control structure for tasks blocked on a rt_mutex,
-@@ -159,6 +160,12 @@ extern bool __rt_mutex_futex_unlock(stru
+@@ -156,6 +157,12 @@ extern bool __rt_mutex_futex_unlock(stru
struct wake_q_head *wqh);
extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
diff --git a/debian/patches-rt/rtmutex-add-mutex-implementation-based-on-rtmutex.patch b/debian/patches-rt/rtmutex-add-mutex-implementation-based-on-rtmutex.patch
index ce118a0af..9024f2f4a 100644
--- a/debian/patches-rt/rtmutex-add-mutex-implementation-based-on-rtmutex.patch
+++ b/debian/patches-rt/rtmutex-add-mutex-implementation-based-on-rtmutex.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 17:17:03 +0200
Subject: rtmutex: add mutex implementation based on rtmutex
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
diff --git a/debian/patches-rt/rtmutex-add-rwlock-implementation-based-on-rtmutex.patch b/debian/patches-rt/rtmutex-add-rwlock-implementation-based-on-rtmutex.patch
index 7b4c91fa8..4b69ca09b 100644
--- a/debian/patches-rt/rtmutex-add-rwlock-implementation-based-on-rtmutex.patch
+++ b/debian/patches-rt/rtmutex-add-rwlock-implementation-based-on-rtmutex.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 17:18:06 +0200
Subject: rtmutex: add rwlock implementation based on rtmutex
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The implementation is bias-based, similar to the rwsem implementation.
diff --git a/debian/patches-rt/rtmutex-add-rwsem-implementation-based-on-rtmutex.patch b/debian/patches-rt/rtmutex-add-rwsem-implementation-based-on-rtmutex.patch
index 48906b8b2..353d49d08 100644
--- a/debian/patches-rt/rtmutex-add-rwsem-implementation-based-on-rtmutex.patch
+++ b/debian/patches-rt/rtmutex-add-rwsem-implementation-based-on-rtmutex.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 17:28:34 +0200
Subject: rtmutex: add rwsem implementation based on rtmutex
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The RT specific R/W semaphore implementation restricts the number of readers
to one because a writer cannot block on multiple readers and inherit its
diff --git a/debian/patches-rt/rtmutex-add-sleeping-lock-implementation.patch b/debian/patches-rt/rtmutex-add-sleeping-lock-implementation.patch
index 9d687f5c8..669bf8153 100644
--- a/debian/patches-rt/rtmutex-add-sleeping-lock-implementation.patch
+++ b/debian/patches-rt/rtmutex-add-sleeping-lock-implementation.patch
@@ -1,46 +1,47 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 17:11:19 +0200
Subject: rtmutex: add sleeping lock implementation
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/kernel.h | 4
+ include/linux/kernel.h | 5
include/linux/rtmutex.h | 21 +
include/linux/sched.h | 8
- include/linux/sched/wake_q.h | 27 ++
+ include/linux/sched/wake_q.h | 15 +
include/linux/spinlock_rt.h | 156 +++++++++++++
include/linux/spinlock_types_rt.h | 48 ++++
kernel/fork.c | 1
kernel/futex.c | 11
kernel/locking/rtmutex.c | 436 ++++++++++++++++++++++++++++++++++----
kernel/locking/rtmutex_common.h | 14 -
- kernel/sched/core.c | 28 +-
- 11 files changed, 695 insertions(+), 59 deletions(-)
+ kernel/sched/core.c | 39 ++-
+ 11 files changed, 696 insertions(+), 58 deletions(-)
create mode 100644 include/linux/spinlock_rt.h
create mode 100644 include/linux/spinlock_types_rt.h
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
-@@ -259,6 +259,9 @@ extern int _cond_resched(void);
+@@ -223,6 +223,10 @@ extern void __cant_sleep(const char *fil
*/
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
+
+# define might_sleep_no_state_check() \
+ do { ___might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
- # define sched_annotate_sleep() (current->task_state_change = 0)
- #else
- static inline void ___might_sleep(const char *file, int line,
-@@ -266,6 +269,7 @@ extern int _cond_resched(void);
++
+ /**
+ * cant_sleep - annotation for functions that cannot sleep
+ *
+@@ -237,6 +241,7 @@ extern void __cant_sleep(const char *fil
static inline void __might_sleep(const char *file, int line,
int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
+# define might_sleep_no_state_check() do { might_resched(); } while (0)
+ # define cant_sleep() do { } while (0)
# define sched_annotate_sleep() do { } while (0)
#endif
-
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -14,11 +14,15 @@
@@ -99,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* @lock: the mutex to be queried
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -135,6 +135,9 @@ struct task_group;
+@@ -136,6 +136,9 @@ struct task_group;
smp_store_mb(current->state, (state_value)); \
} while (0)
@@ -109,7 +110,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define set_special_state(state_value) \
do { \
unsigned long flags; /* may shadow */ \
-@@ -144,6 +147,7 @@ struct task_group;
+@@ -145,6 +148,7 @@ struct task_group;
current->state = (state_value); \
raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
} while (0)
@@ -117,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#else
/*
* set_current_state() includes a barrier so that the write of current->state
-@@ -188,6 +192,9 @@ struct task_group;
+@@ -189,6 +193,9 @@ struct task_group;
#define set_current_state(state_value) \
smp_store_mb(current->state, (state_value))
@@ -127,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* set_special_state() should be used for those states when the blocking task
* can not use the regular condition based wait-loop. In that case we must
-@@ -914,6 +921,7 @@ struct task_struct {
+@@ -910,6 +917,7 @@ struct task_struct {
raw_spinlock_t pi_lock;
struct wake_q_node wake_q;
@@ -137,28 +138,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* PI waiters blocked on a rt_mutex held by this task: */
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
-@@ -51,8 +51,29 @@ static inline void wake_q_init(struct wa
+@@ -51,8 +51,21 @@ static inline void wake_q_init(struct wa
head->lastp = &head->first;
}
--extern void wake_q_add(struct wake_q_head *head,
-- struct task_struct *task);
--extern void wake_up_q(struct wake_q_head *head);
-+extern void __wake_q_add(struct wake_q_head *head,
-+ struct task_struct *task, bool sleeper);
-+static inline void wake_q_add(struct wake_q_head *head,
-+ struct task_struct *task)
-+{
-+ __wake_q_add(head, task, false);
-+}
+
-+static inline void wake_q_add_sleeper(struct wake_q_head *head,
-+ struct task_struct *task)
-+{
-+ __wake_q_add(head, task, true);
-+}
+ extern void wake_q_add(struct wake_q_head *head, struct task_struct *task);
+ extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task);
+-extern void wake_up_q(struct wake_q_head *head);
++extern void wake_q_add_sleeper(struct wake_q_head *head, struct task_struct *task);
+
+extern void __wake_up_q(struct wake_q_head *head, bool sleeper);
++
+static inline void wake_up_q(struct wake_q_head *head)
+{
+ __wake_up_q(head, false);
@@ -382,7 +373,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -941,6 +941,7 @@ static struct task_struct *dup_task_stru
+@@ -949,6 +949,7 @@ static struct task_struct *dup_task_stru
tsk->splice_pipe = NULL;
tsk->task_frag.page = NULL;
tsk->wake_q.next = NULL;
@@ -392,7 +383,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1482,6 +1482,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1490,6 +1490,7 @@ static int wake_futex_pi(u32 __user *uad
struct task_struct *new_owner;
bool postunlock = false;
DEFINE_WAKE_Q(wake_q);
@@ -400,7 +391,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int ret = 0;
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
-@@ -1543,13 +1544,13 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1549,13 +1550,13 @@ static int wake_futex_pi(u32 __user *uad
pi_state->owner = new_owner;
raw_spin_unlock(&new_owner->pi_lock);
@@ -417,7 +408,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -2861,7 +2862,7 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2879,7 +2880,7 @@ static int futex_lock_pi(u32 __user *uad
goto no_block;
}
@@ -426,7 +417,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
-@@ -3240,7 +3241,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3253,7 +3254,7 @@ static int futex_wait_requeue_pi(u32 __u
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
@@ -437,7 +428,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(ret != 0))
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -7,6 +7,11 @@
+@@ -8,6 +8,11 @@
* Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
* Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
* Copyright (C) 2006 Esben Nielsen
@@ -449,7 +440,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* See Documentation/locking/rt-mutex-design.txt for details.
*/
-@@ -234,7 +239,7 @@ static inline bool unlock_rt_mutex_safe(
+@@ -229,7 +234,7 @@ static inline bool unlock_rt_mutex_safe(
* Only use with rt_mutex_waiter_{less,equal}()
*/
#define task_to_waiter(p) \
@@ -458,7 +449,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline int
rt_mutex_waiter_less(struct rt_mutex_waiter *left,
-@@ -274,6 +279,27 @@ rt_mutex_waiter_equal(struct rt_mutex_wa
+@@ -269,6 +274,27 @@ rt_mutex_waiter_equal(struct rt_mutex_wa
return 1;
}
@@ -486,7 +477,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void
rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
{
-@@ -378,6 +404,14 @@ static bool rt_mutex_cond_detect_deadloc
+@@ -373,6 +399,14 @@ static bool rt_mutex_cond_detect_deadloc
return debug_rt_mutex_detect_deadlock(waiter, chwalk);
}
@@ -501,7 +492,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Max number of times we'll walk the boosting chain:
*/
-@@ -703,13 +737,16 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -697,13 +731,16 @@ static int rt_mutex_adjust_prio_chain(st
* follow here. This is the end of the chain we are walking.
*/
if (!rt_mutex_owner(lock)) {
@@ -520,7 +511,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irq(&lock->wait_lock);
return 0;
}
-@@ -811,9 +848,11 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -805,9 +842,11 @@ static int rt_mutex_adjust_prio_chain(st
* @task: The task which wants to acquire the lock
* @waiter: The waiter that is queued to the lock's wait tree if the
* callsite called task_blocked_on_lock(), otherwise NULL
@@ -534,7 +525,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
lockdep_assert_held(&lock->wait_lock);
-@@ -849,12 +888,11 @@ static int try_to_take_rt_mutex(struct r
+@@ -843,12 +882,11 @@ static int try_to_take_rt_mutex(struct r
*/
if (waiter) {
/*
@@ -550,7 +541,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We can acquire the lock. Remove the waiter from the
* lock waiters tree.
-@@ -872,14 +910,12 @@ static int try_to_take_rt_mutex(struct r
+@@ -866,14 +904,12 @@ static int try_to_take_rt_mutex(struct r
*/
if (rt_mutex_has_waiters(lock)) {
/*
@@ -569,7 +560,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* The current top waiter stays enqueued. We
* don't have to change anything in the lock
-@@ -926,6 +962,296 @@ static int try_to_take_rt_mutex(struct r
+@@ -920,6 +956,296 @@ static int try_to_take_rt_mutex(struct r
return 1;
}
@@ -866,7 +857,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Task blocks on lock.
*
-@@ -1039,6 +1365,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -1017,6 +1343,7 @@ static int task_blocks_on_rt_mutex(struc
* Called with lock->wait_lock held and interrupts disabled.
*/
static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
@@ -874,7 +865,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rt_mutex *lock)
{
struct rt_mutex_waiter *waiter;
-@@ -1078,7 +1405,10 @@ static void mark_wakeup_next_waiter(stru
+@@ -1056,7 +1383,10 @@ static void mark_wakeup_next_waiter(stru
* Pairs with preempt_enable() in rt_mutex_postunlock();
*/
preempt_disable();
@@ -886,7 +877,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock(&current->pi_lock);
}
-@@ -1162,21 +1492,22 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1138,21 +1468,22 @@ void rt_mutex_adjust_pi(struct task_stru
return;
}
next_lock = waiter->lock;
@@ -911,7 +902,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -1293,7 +1624,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1269,7 +1600,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
unsigned long flags;
int ret = 0;
@@ -920,7 +911,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Technically we could use raw_spin_[un]lock_irq() here, but this can
-@@ -1366,7 +1697,8 @@ static inline int rt_mutex_slowtrylock(s
+@@ -1342,7 +1673,8 @@ static inline int rt_mutex_slowtrylock(s
* Return whether the current task needs to call rt_mutex_postunlock().
*/
static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
@@ -930,7 +921,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
unsigned long flags;
-@@ -1420,7 +1752,7 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1396,7 +1728,7 @@ static bool __sched rt_mutex_slowunlock(
*
* Queue the next waiter for wakeup once we release the wait_lock.
*/
@@ -939,7 +930,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
return true; /* call rt_mutex_postunlock() */
-@@ -1472,9 +1804,11 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
+@@ -1448,9 +1780,11 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
/*
* Performs the wakeup of the the top-waiter and re-enables preemption.
*/
@@ -952,7 +943,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
preempt_enable();
-@@ -1483,15 +1817,17 @@ void rt_mutex_postunlock(struct wake_q_h
+@@ -1459,15 +1793,17 @@ void rt_mutex_postunlock(struct wake_q_h
static inline void
rt_mutex_fastunlock(struct rt_mutex *lock,
bool (*slowfn)(struct rt_mutex *lock,
@@ -973,7 +964,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state)
-@@ -1673,16 +2009,13 @@ void __sched __rt_mutex_unlock(struct rt
+@@ -1649,16 +1985,13 @@ void __sched __rt_mutex_unlock(struct rt
void __sched rt_mutex_unlock(struct rt_mutex *lock)
{
mutex_release(&lock->dep_map, 1, _RET_IP_);
@@ -994,7 +985,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
lockdep_assert_held(&lock->wait_lock);
-@@ -1699,23 +2032,35 @@ bool __sched __rt_mutex_futex_unlock(str
+@@ -1675,23 +2008,35 @@ bool __sched __rt_mutex_futex_unlock(str
* avoid inversion prior to the wakeup. preempt_disable()
* therein pairs with rt_mutex_postunlock().
*/
@@ -1033,7 +1024,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -1754,7 +2099,7 @@ void __rt_mutex_init(struct rt_mutex *lo
+@@ -1730,7 +2075,7 @@ void __rt_mutex_init(struct rt_mutex *lo
if (name && key)
debug_rt_mutex_init(lock, name, key);
}
@@ -1042,7 +1033,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
-@@ -1949,6 +2294,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -1897,6 +2242,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
struct hrtimer_sleeper *to,
struct rt_mutex_waiter *waiter)
{
@@ -1050,7 +1041,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int ret;
raw_spin_lock_irq(&lock->wait_lock);
-@@ -1960,6 +2306,24 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -1908,6 +2254,24 @@ int rt_mutex_wait_proxy_lock(struct rt_m
* have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
@@ -1085,7 +1076,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_DEBUG_RT_MUTEXES
unsigned long ip;
struct pid *deadlock_task_pid;
-@@ -139,7 +140,7 @@ extern void rt_mutex_init_proxy_locked(s
+@@ -136,7 +137,7 @@ extern void rt_mutex_init_proxy_locked(s
struct task_struct *proxy_owner);
extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
struct task_struct *proxy_owner);
@@ -1094,7 +1085,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task);
-@@ -157,9 +158,12 @@ extern int __rt_mutex_futex_trylock(stru
+@@ -154,9 +155,12 @@ extern int __rt_mutex_futex_trylock(stru
extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
@@ -1109,7 +1100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* RW semaphore special interface */
extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state);
-@@ -169,6 +173,10 @@ int __sched rt_mutex_slowlock_locked(str
+@@ -166,6 +170,10 @@ int __sched rt_mutex_slowlock_locked(str
struct hrtimer_sleeper *timeout,
enum rtmutex_chainwalk chwalk,
struct rt_mutex_waiter *waiter);
@@ -1122,13 +1113,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
# include "rtmutex-debug.h"
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -413,9 +413,15 @@ static bool set_nr_if_polling(struct tas
- * This function must be used as-if it were wake_up_process(); IOW the task
- * must be ready to be woken at this location.
- */
--void wake_q_add(struct wake_q_head *head, struct task_struct *task)
-+void __wake_q_add(struct wake_q_head *head, struct task_struct *task,
-+ bool sleeper)
+@@ -403,9 +403,15 @@ static bool set_nr_if_polling(struct tas
+ #endif
+ #endif
+
+-static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
++static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task,
++ bool sleeper)
{
- struct wake_q_node *node = &task->wake_q;
+ struct wake_q_node *node;
@@ -1140,8 +1131,28 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Atomically grab the task, if ->wake_q is !nil already it means
-@@ -438,24 +444,32 @@ void wake_q_add(struct wake_q_head *head
- head->lastp = &node->next;
+@@ -441,7 +447,13 @@ static bool __wake_q_add(struct wake_q_h
+ */
+ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
+ {
+- if (__wake_q_add(head, task))
++ if (__wake_q_add(head, task, false))
++ get_task_struct(task);
++}
++
++void wake_q_add_sleeper(struct wake_q_head *head, struct task_struct *task)
++{
++ if (__wake_q_add(head, task, true))
+ get_task_struct(task);
+ }
+
+@@ -464,28 +476,39 @@ void wake_q_add(struct wake_q_head *head
+ */
+ void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
+ {
+- if (!__wake_q_add(head, task))
++ if (!__wake_q_add(head, task, false))
+ put_task_struct(task);
}
-void wake_up_q(struct wake_q_head *head)
@@ -1157,11 +1168,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ task = container_of(node, struct task_struct, wake_q_sleeper);
+ else
+ task = container_of(node, struct task_struct, wake_q);
++
BUG_ON(!task);
/* Task can safely be re-inserted now: */
node = node->next;
- task->wake_q.next = NULL;
--
+
+ if (sleeper)
+ task->wake_q_sleeper.next = NULL;
+ else
@@ -1175,6 +1187,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ wake_up_lock_sleeper(task);
+ else
+ wake_up_process(task);
++
put_task_struct(task);
}
}
diff --git a/debian/patches-rt/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch b/debian/patches-rt/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
index 36b86314f..b1b349211 100644
--- a/debian/patches-rt/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
+++ b/debian/patches-rt/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 12 Oct 2017 17:34:38 +0200
Subject: rtmutex: add ww_mutex addon for mutex-rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
@@ -12,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -23,6 +23,7 @@
+@@ -24,6 +24,7 @@
#include <linux/sched/wake_q.h>
#include <linux/sched/debug.h>
#include <linux/timer.h>
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "rtmutex_common.h"
-@@ -1245,6 +1246,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
+@@ -1239,6 +1240,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
#endif /* PREEMPT_RT_FULL */
@@ -61,7 +61,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline int
try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
struct rt_mutex_waiter *waiter)
-@@ -1523,7 +1558,8 @@ void rt_mutex_init_waiter(struct rt_mute
+@@ -1499,7 +1534,8 @@ void rt_mutex_init_waiter(struct rt_mute
static int __sched
__rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
@@ -71,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
int ret = 0;
-@@ -1541,6 +1577,12 @@ static int __sched
+@@ -1517,6 +1553,12 @@ static int __sched
break;
}
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irq(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
-@@ -1575,16 +1617,106 @@ static void rt_mutex_handle_deadlock(int
+@@ -1551,16 +1593,106 @@ static void rt_mutex_handle_deadlock(int
}
}
@@ -192,7 +192,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
set_current_state(state);
-@@ -1594,14 +1726,24 @@ int __sched rt_mutex_slowlock_locked(str
+@@ -1570,14 +1702,24 @@ int __sched rt_mutex_slowlock_locked(str
ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
@@ -220,7 +220,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -1618,7 +1760,8 @@ int __sched rt_mutex_slowlock_locked(str
+@@ -1594,7 +1736,8 @@ int __sched rt_mutex_slowlock_locked(str
static int __sched
rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
@@ -230,7 +230,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct rt_mutex_waiter waiter;
unsigned long flags;
-@@ -1636,7 +1779,8 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1612,7 +1755,8 @@ rt_mutex_slowlock(struct rt_mutex *lock,
*/
raw_spin_lock_irqsave(&lock->wait_lock, flags);
@@ -240,7 +240,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-@@ -1766,29 +1910,33 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1742,29 +1886,33 @@ static bool __sched rt_mutex_slowunlock(
*/
static inline int
rt_mutex_fastlock(struct rt_mutex *lock, int state,
@@ -278,7 +278,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static inline int
-@@ -1833,7 +1981,7 @@ rt_mutex_fastunlock(struct rt_mutex *loc
+@@ -1809,7 +1957,7 @@ rt_mutex_fastunlock(struct rt_mutex *loc
int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state)
{
might_sleep();
@@ -287,7 +287,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -1953,6 +2101,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
+@@ -1929,6 +2077,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
RT_MUTEX_MIN_CHAINWALK,
@@ -295,7 +295,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rt_mutex_slowlock);
if (ret)
mutex_release(&lock->dep_map, 1, _RET_IP_);
-@@ -2300,7 +2449,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -2248,7 +2397,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
raw_spin_lock_irq(&lock->wait_lock);
/* sleep on the mutex */
set_current_state(TASK_INTERRUPTIBLE);
@@ -304,7 +304,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
-@@ -2385,3 +2534,99 @@ bool rt_mutex_cleanup_proxy_lock(struct
+@@ -2333,3 +2482,99 @@ bool rt_mutex_cleanup_proxy_lock(struct
return cleanup;
}
@@ -406,7 +406,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -165,6 +165,7 @@ extern void rt_mutex_postunlock(struct w
+@@ -162,6 +162,7 @@ extern void rt_mutex_postunlock(struct w
struct wake_q_head *wake_sleeper_q);
/* RW semaphore special interface */
@@ -414,7 +414,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state);
extern int __rt_mutex_trylock(struct rt_mutex *lock);
-@@ -172,6 +173,7 @@ extern void __rt_mutex_unlock(struct rt_
+@@ -169,6 +170,7 @@ extern void __rt_mutex_unlock(struct rt_
int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
enum rtmutex_chainwalk chwalk,
diff --git a/debian/patches-rt/rtmutex-annotate-sleeping-lock-context.patch b/debian/patches-rt/rtmutex-annotate-sleeping-lock-context.patch
index 73f2ab036..38ccb16c2 100644
--- a/debian/patches-rt/rtmutex-annotate-sleeping-lock-context.patch
+++ b/debian/patches-rt/rtmutex-annotate-sleeping-lock-context.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 21 Sep 2017 14:25:13 +0200
Subject: [PATCH] rtmutex: annotate sleeping lock context
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The RCU code complains on schedule() within a rcu_readlock() section.
The valid scenario on -RT is if a sleeping is held. In order to suppress
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define migrate_enable() barrier()
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -672,6 +672,15 @@ struct task_struct {
+@@ -664,6 +664,15 @@ struct task_struct {
# ifdef CONFIG_SCHED_DEBUG
int migrate_disable_atomic;
# endif
@@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
#ifdef CONFIG_PREEMPT_RCU
-@@ -1813,6 +1822,23 @@ static __always_inline bool need_resched
+@@ -1816,6 +1825,23 @@ static __always_inline bool need_resched
return unlikely(tif_need_resched());
}
@@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1141,6 +1141,7 @@ void __sched rt_spin_lock_slowunlock(str
+@@ -1135,6 +1135,7 @@ void __sched rt_spin_lock_slowunlock(str
void __lockfunc rt_spin_lock(spinlock_t *lock)
{
@@ -102,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
migrate_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
-@@ -1155,6 +1156,7 @@ void __lockfunc __rt_spin_lock(struct rt
+@@ -1149,6 +1150,7 @@ void __lockfunc __rt_spin_lock(struct rt
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
{
@@ -110,7 +110,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
migrate_disable();
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
-@@ -1168,6 +1170,7 @@ void __lockfunc rt_spin_unlock(spinlock_
+@@ -1162,6 +1164,7 @@ void __lockfunc rt_spin_unlock(spinlock_
spin_release(&lock->dep_map, 1, _RET_IP_);
rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
migrate_enable();
@@ -118,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL(rt_spin_unlock);
-@@ -1193,12 +1196,15 @@ int __lockfunc rt_spin_trylock(spinlock_
+@@ -1187,12 +1190,15 @@ int __lockfunc rt_spin_trylock(spinlock_
{
int ret;
@@ -136,7 +136,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
EXPORT_SYMBOL(rt_spin_trylock);
-@@ -1210,6 +1216,7 @@ int __lockfunc rt_spin_trylock_bh(spinlo
+@@ -1204,6 +1210,7 @@ int __lockfunc rt_spin_trylock_bh(spinlo
local_bh_disable();
ret = __rt_mutex_trylock(&lock->lock);
if (ret) {
@@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
migrate_disable();
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
} else
-@@ -1225,6 +1232,7 @@ int __lockfunc rt_spin_trylock_irqsave(s
+@@ -1219,6 +1226,7 @@ int __lockfunc rt_spin_trylock_irqsave(s
*flags = 0;
ret = __rt_mutex_trylock(&lock->lock);
if (ret) {
@@ -223,7 +223,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
-@@ -330,11 +330,15 @@ void rcu_note_context_switch(bool preemp
+@@ -307,11 +307,15 @@ void rcu_note_context_switch(bool preemp
struct task_struct *t = current;
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
struct rcu_node *rnp;
@@ -242,7 +242,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7321,4 +7321,49 @@ void migrate_enable(void)
+@@ -7335,4 +7335,49 @@ void migrate_enable(void)
preempt_enable();
}
EXPORT_SYMBOL(migrate_enable);
diff --git a/debian/patches-rt/rtmutex-avoid-include-hell.patch b/debian/patches-rt/rtmutex-avoid-include-hell.patch
index 0d696beeb..773f7c90f 100644
--- a/debian/patches-rt/rtmutex-avoid-include-hell.patch
+++ b/debian/patches-rt/rtmutex-avoid-include-hell.patch
@@ -1,7 +1,7 @@
Subject: rtmutex: Avoid include hell
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 29 Jun 2011 20:06:39 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Include only the required raw types. This avoids pulling in the
complete spinlock header which in turn requires rtmutex.h at some point.
diff --git a/debian/patches-rt/rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch b/debian/patches-rt/rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch
index 7a66b5157..d18093da3 100644
--- a/debian/patches-rt/rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch
+++ b/debian/patches-rt/rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch
@@ -2,7 +2,7 @@ From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 16:36:39 +0200
Subject: rtmutex: export lockdep-less version of rt_mutex's lock,
trylock and unlock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Required for lock implementation ontop of rtmutex.
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1494,12 +1494,33 @@ rt_mutex_fastunlock(struct rt_mutex *loc
+@@ -1470,12 +1470,33 @@ rt_mutex_fastunlock(struct rt_mutex *loc
rt_mutex_postunlock(&wake_q);
}
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-@@ -1540,16 +1561,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+@@ -1516,16 +1537,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
*/
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
{
@@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
-@@ -1575,13 +1587,10 @@ int __sched __rt_mutex_futex_trylock(str
+@@ -1551,13 +1563,10 @@ int __sched __rt_mutex_futex_trylock(str
* Returns:
* 0 on success
* -EINTR when interrupted by a signal
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
-@@ -1616,6 +1625,18 @@ rt_mutex_timed_lock(struct rt_mutex *loc
+@@ -1592,6 +1601,18 @@ rt_mutex_timed_lock(struct rt_mutex *loc
}
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* rt_mutex_trylock - try to lock a rt_mutex
*
-@@ -1631,14 +1652,7 @@ int __sched rt_mutex_trylock(struct rt_m
+@@ -1607,14 +1628,7 @@ int __sched rt_mutex_trylock(struct rt_m
{
int ret;
@@ -119,7 +119,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (ret)
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-@@ -1646,6 +1660,11 @@ int __sched rt_mutex_trylock(struct rt_m
+@@ -1622,6 +1636,11 @@ int __sched rt_mutex_trylock(struct rt_m
}
EXPORT_SYMBOL_GPL(rt_mutex_trylock);
@@ -133,7 +133,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -162,6 +162,9 @@ extern bool __rt_mutex_futex_unlock(stru
+@@ -159,6 +159,9 @@ extern bool __rt_mutex_futex_unlock(stru
extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
/* RW semaphore special interface */
diff --git a/debian/patches-rt/rtmutex-futex-prepare-rt.patch b/debian/patches-rt/rtmutex-futex-prepare-rt.patch
deleted file mode 100644
index 7aabe5d09..000000000
--- a/debian/patches-rt/rtmutex-futex-prepare-rt.patch
+++ /dev/null
@@ -1,245 +0,0 @@
-Subject: rtmutex: Handle the various new futex race conditions
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 10 Jun 2011 11:04:15 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-RT opens a few new interesting race conditions in the rtmutex/futex
-combo due to futex hash bucket lock being a 'sleeping' spinlock and
-therefor not disabling preemption.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- kernel/futex.c | 77 ++++++++++++++++++++++++++++++++--------
- kernel/locking/rtmutex.c | 36 +++++++++++++++---
- kernel/locking/rtmutex_common.h | 2 +
- 3 files changed, 94 insertions(+), 21 deletions(-)
-
---- a/kernel/futex.c
-+++ b/kernel/futex.c
-@@ -2154,6 +2154,16 @@ static int futex_requeue(u32 __user *uad
- requeue_pi_wake_futex(this, &key2, hb2);
- drop_count++;
- continue;
-+ } else if (ret == -EAGAIN) {
-+ /*
-+ * Waiter was woken by timeout or
-+ * signal and has set pi_blocked_on to
-+ * PI_WAKEUP_INPROGRESS before we
-+ * tried to enqueue it on the rtmutex.
-+ */
-+ this->pi_state = NULL;
-+ put_pi_state(pi_state);
-+ continue;
- } else if (ret) {
- /*
- * rt_mutex_start_proxy_lock() detected a
-@@ -3201,7 +3211,7 @@ static int futex_wait_requeue_pi(u32 __u
- struct hrtimer_sleeper timeout, *to = NULL;
- struct futex_pi_state *pi_state = NULL;
- struct rt_mutex_waiter rt_waiter;
-- struct futex_hash_bucket *hb;
-+ struct futex_hash_bucket *hb, *hb2;
- union futex_key key2 = FUTEX_KEY_INIT;
- struct futex_q q = futex_q_init;
- int res, ret;
-@@ -3259,20 +3269,55 @@ static int futex_wait_requeue_pi(u32 __u
- /* Queue the futex_q, drop the hb lock, wait for wakeup. */
- futex_wait_queue_me(hb, &q, to);
-
-- spin_lock(&hb->lock);
-- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
-- spin_unlock(&hb->lock);
-- if (ret)
-- goto out_put_keys;
-+ /*
-+ * On RT we must avoid races with requeue and trying to block
-+ * on two mutexes (hb->lock and uaddr2's rtmutex) by
-+ * serializing access to pi_blocked_on with pi_lock.
-+ */
-+ raw_spin_lock_irq(&current->pi_lock);
-+ if (current->pi_blocked_on) {
-+ /*
-+ * We have been requeued or are in the process of
-+ * being requeued.
-+ */
-+ raw_spin_unlock_irq(&current->pi_lock);
-+ } else {
-+ /*
-+ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
-+ * prevents a concurrent requeue from moving us to the
-+ * uaddr2 rtmutex. After that we can safely acquire
-+ * (and possibly block on) hb->lock.
-+ */
-+ current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
-+ raw_spin_unlock_irq(&current->pi_lock);
-+
-+ spin_lock(&hb->lock);
-+
-+ /*
-+ * Clean up pi_blocked_on. We might leak it otherwise
-+ * when we succeeded with the hb->lock in the fast
-+ * path.
-+ */
-+ raw_spin_lock_irq(&current->pi_lock);
-+ current->pi_blocked_on = NULL;
-+ raw_spin_unlock_irq(&current->pi_lock);
-+
-+ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
-+ spin_unlock(&hb->lock);
-+ if (ret)
-+ goto out_put_keys;
-+ }
-
- /*
-- * In order for us to be here, we know our q.key == key2, and since
-- * we took the hb->lock above, we also know that futex_requeue() has
-- * completed and we no longer have to concern ourselves with a wakeup
-- * race with the atomic proxy lock acquisition by the requeue code. The
-- * futex_requeue dropped our key1 reference and incremented our key2
-- * reference count.
-+ * In order to be here, we have either been requeued, are in
-+ * the process of being requeued, or requeue successfully
-+ * acquired uaddr2 on our behalf. If pi_blocked_on was
-+ * non-null above, we may be racing with a requeue. Do not
-+ * rely on q->lock_ptr to be hb2->lock until after blocking on
-+ * hb->lock or hb2->lock. The futex_requeue dropped our key1
-+ * reference and incremented our key2 reference count.
- */
-+ hb2 = hash_futex(&key2);
-
- /* Check if the requeue code acquired the second futex for us. */
- if (!q.rt_waiter) {
-@@ -3281,7 +3326,8 @@ static int futex_wait_requeue_pi(u32 __u
- * did a lock-steal - fix up the PI-state in that case.
- */
- if (q.pi_state && (q.pi_state->owner != current)) {
-- spin_lock(q.lock_ptr);
-+ spin_lock(&hb2->lock);
-+ BUG_ON(&hb2->lock != q.lock_ptr);
- ret = fixup_pi_state_owner(uaddr2, &q, current);
- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
- pi_state = q.pi_state;
-@@ -3292,7 +3338,7 @@ static int futex_wait_requeue_pi(u32 __u
- * the requeue_pi() code acquired for us.
- */
- put_pi_state(q.pi_state);
-- spin_unlock(q.lock_ptr);
-+ spin_unlock(&hb2->lock);
- }
- } else {
- struct rt_mutex *pi_mutex;
-@@ -3306,7 +3352,8 @@ static int futex_wait_requeue_pi(u32 __u
- pi_mutex = &q.pi_state->pi_mutex;
- ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
-
-- spin_lock(q.lock_ptr);
-+ spin_lock(&hb2->lock);
-+ BUG_ON(&hb2->lock != q.lock_ptr);
- if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
- ret = 0;
-
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -135,6 +135,11 @@ static void fixup_rt_mutex_waiters(struc
- WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
- }
-
-+static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
-+{
-+ return waiter && waiter != PI_WAKEUP_INPROGRESS;
-+}
-+
- /*
- * We can speed up the acquire/release, if there's no debugging state to be
- * set up.
-@@ -379,7 +384,8 @@ int max_lock_depth = 1024;
-
- static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
- {
-- return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
-+ return rt_mutex_real_waiter(p->pi_blocked_on) ?
-+ p->pi_blocked_on->lock : NULL;
- }
-
- /*
-@@ -515,7 +521,7 @@ static int rt_mutex_adjust_prio_chain(st
- * reached or the state of the chain has changed while we
- * dropped the locks.
- */
-- if (!waiter)
-+ if (!rt_mutex_real_waiter(waiter))
- goto out_unlock_pi;
-
- /*
-@@ -951,6 +957,22 @@ static int task_blocks_on_rt_mutex(struc
- return -EDEADLK;
-
- raw_spin_lock(&task->pi_lock);
-+ /*
-+ * In the case of futex requeue PI, this will be a proxy
-+ * lock. The task will wake unaware that it is enqueueed on
-+ * this lock. Avoid blocking on two locks and corrupting
-+ * pi_blocked_on via the PI_WAKEUP_INPROGRESS
-+ * flag. futex_wait_requeue_pi() sets this when it wakes up
-+ * before requeue (due to a signal or timeout). Do not enqueue
-+ * the task if PI_WAKEUP_INPROGRESS is set.
-+ */
-+ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
-+ raw_spin_unlock(&task->pi_lock);
-+ return -EAGAIN;
-+ }
-+
-+ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
-+
- waiter->task = task;
- waiter->lock = lock;
- waiter->prio = task->prio;
-@@ -974,7 +996,7 @@ static int task_blocks_on_rt_mutex(struc
- rt_mutex_enqueue_pi(owner, waiter);
-
- rt_mutex_adjust_prio(owner);
-- if (owner->pi_blocked_on)
-+ if (rt_mutex_real_waiter(owner->pi_blocked_on))
- chain_walk = 1;
- } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
- chain_walk = 1;
-@@ -1070,7 +1092,7 @@ static void remove_waiter(struct rt_mute
- {
- bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
- struct task_struct *owner = rt_mutex_owner(lock);
-- struct rt_mutex *next_lock;
-+ struct rt_mutex *next_lock = NULL;
-
- lockdep_assert_held(&lock->wait_lock);
-
-@@ -1096,7 +1118,8 @@ static void remove_waiter(struct rt_mute
- rt_mutex_adjust_prio(owner);
-
- /* Store the lock on which owner is blocked or NULL */
-- next_lock = task_blocked_on_lock(owner);
-+ if (rt_mutex_real_waiter(owner->pi_blocked_on))
-+ next_lock = task_blocked_on_lock(owner);
-
- raw_spin_unlock(&owner->pi_lock);
-
-@@ -1132,7 +1155,8 @@ void rt_mutex_adjust_pi(struct task_stru
- raw_spin_lock_irqsave(&task->pi_lock, flags);
-
- waiter = task->pi_blocked_on;
-- if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
-+ if (!rt_mutex_real_waiter(waiter) ||
-+ rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
- return;
- }
---- a/kernel/locking/rtmutex_common.h
-+++ b/kernel/locking/rtmutex_common.h
-@@ -130,6 +130,8 @@ enum rtmutex_chainwalk {
- /*
- * PI-futex support (proxy locking functions, etc.):
- */
-+#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
-+
- extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
- extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
- struct task_struct *proxy_owner);
diff --git a/debian/patches-rt/rtmutex-lock-killable.patch b/debian/patches-rt/rtmutex-lock-killable.patch
index 3fd36b4e3..bf7e82024 100644
--- a/debian/patches-rt/rtmutex-lock-killable.patch
+++ b/debian/patches-rt/rtmutex-lock-killable.patch
@@ -1,7 +1,7 @@
Subject: rtmutex: Add rtmutex_lock_killable()
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 09 Jun 2011 11:43:52 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Add "killable" type to rtmutex. We need this since rtmutex are used as
"normal" mutexes which do use this type.
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1563,6 +1563,25 @@ int __sched __rt_mutex_futex_trylock(str
+@@ -1539,6 +1539,25 @@ int __sched __rt_mutex_futex_trylock(str
}
/**
diff --git a/debian/patches-rt/rtmutex-trylock-is-okay-on-RT.patch b/debian/patches-rt/rtmutex-trylock-is-okay-on-RT.patch
index 2581577ed..958261899 100644
--- a/debian/patches-rt/rtmutex-trylock-is-okay-on-RT.patch
+++ b/debian/patches-rt/rtmutex-trylock-is-okay-on-RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed 02 Dec 2015 11:34:07 +0100
Subject: rtmutex: trylock is okay on -RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
non-RT kernel could deadlock on rt_mutex_trylock() in softirq context. On
-RT we don't run softirqs in IRQ context but in thread context so it is
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1583,7 +1583,11 @@ int __sched rt_mutex_trylock(struct rt_m
+@@ -1584,7 +1584,11 @@ int __sched rt_mutex_trylock(struct rt_m
{
int ret;
diff --git a/debian/patches-rt/rtmutex-wire-up-RT-s-locking.patch b/debian/patches-rt/rtmutex-wire-up-RT-s-locking.patch
index 4a00f4091..debc6c08a 100644
--- a/debian/patches-rt/rtmutex-wire-up-RT-s-locking.patch
+++ b/debian/patches-rt/rtmutex-wire-up-RT-s-locking.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 17:31:14 +0200
Subject: rtmutex: wire up RT's locking
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -11,10 +11,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
include/linux/spinlock.h | 12 +++++++++++-
include/linux/spinlock_api_smp.h | 4 +++-
include/linux/spinlock_types.h | 11 ++++++++---
- kernel/locking/Makefile | 9 ++++++++-
+ kernel/locking/Makefile | 10 +++++++++-
+ kernel/locking/rwsem.h | 2 ++
kernel/locking/spinlock.c | 7 +++++++
kernel/locking/spinlock_debug.c | 5 +++++
- 8 files changed, 66 insertions(+), 13 deletions(-)
+ 9 files changed, 69 insertions(+), 13 deletions(-)
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -67,10 +68,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#include <linux/rwsem-rt.h>
+#else /* PREEMPT_RT_FULL */
+
- struct rw_semaphore;
-
- #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
-@@ -114,6 +118,13 @@ static inline int rwsem_is_contended(str
+ /*
+ * For an uncontended rwsem, count and owner are the only fields a task
+ * needs to touch when acquiring the rwsem. So they are put next to each
+@@ -109,6 +113,13 @@ static inline int rwsem_is_contended(str
return !list_empty(&sem->wait_list);
}
@@ -86,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
-@@ -298,7 +298,11 @@ static inline void do_raw_spin_unlock(ra
+@@ -307,7 +307,11 @@ static inline void do_raw_spin_unlock(ra
})
/* Include rwlock functions */
@@ -99,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
-@@ -309,6 +313,10 @@ static inline void do_raw_spin_unlock(ra
+@@ -318,6 +322,10 @@ static inline void do_raw_spin_unlock(ra
# include <linux/spinlock_api_up.h>
#endif
@@ -110,7 +111,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
-@@ -429,6 +437,8 @@ static __always_inline int spin_is_conte
+@@ -438,6 +446,8 @@ static __always_inline int spin_is_conte
#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
@@ -156,7 +157,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
# and is generally not a function of system call inputs.
KCOV_INSTRUMENT := n
--obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
+-obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o rwsem-xadd.o
+obj-y += semaphore.o percpu-rwsem.o
ifdef CONFIG_FUNCTION_TRACER
@@ -173,21 +174,35 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
obj-$(CONFIG_LOCKDEP) += lockdep.o
ifeq ($(CONFIG_PROC_FS),y)
obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
-@@ -25,8 +29,11 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
+@@ -25,6 +29,10 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
+ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
- obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
- obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
++obj-y += mutex.o rwsem.o rwsem-xadd.o
+endif
+obj-$(CONFIG_PREEMPT_RT_FULL) += mutex-rt.o rwsem-rt.o rwlock-rt.o
obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o
+--- a/kernel/locking/rwsem.h
++++ b/kernel/locking/rwsem.h
+@@ -169,6 +169,7 @@ extern struct rw_semaphore *rwsem_down_w
+ extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
+ extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * lock for reading
+ */
+@@ -302,3 +303,4 @@ static inline void __downgrade_write(str
+ if (tmp < 0)
+ rwsem_downgrade_wake(sem);
+ }
++#endif
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
-@@ -117,8 +117,11 @@ void __lockfunc __raw_##op##_lock_bh(loc
+@@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(loc
* __[spin|read|write]_lock_bh()
*/
BUILD_LOCK_OPS(spin, raw_spinlock);
@@ -199,7 +214,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
-@@ -202,6 +205,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_
+@@ -209,6 +212,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_
EXPORT_SYMBOL(_raw_spin_unlock_bh);
#endif
@@ -208,7 +223,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifndef CONFIG_INLINE_READ_TRYLOCK
int __lockfunc _raw_read_trylock(rwlock_t *lock)
{
-@@ -346,6 +351,8 @@ void __lockfunc _raw_write_unlock_bh(rwl
+@@ -353,6 +358,8 @@ void __lockfunc _raw_write_unlock_bh(rwl
EXPORT_SYMBOL(_raw_write_unlock_bh);
#endif
@@ -235,7 +250,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void spin_dump(raw_spinlock_t *lock, const char *msg)
{
-@@ -135,6 +137,7 @@ void do_raw_spin_unlock(raw_spinlock_t *
+@@ -139,6 +141,7 @@ void do_raw_spin_unlock(raw_spinlock_t *
arch_spin_unlock(&lock->raw_lock);
}
@@ -243,7 +258,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void rwlock_bug(rwlock_t *lock, const char *msg)
{
if (!debug_locks_off())
-@@ -224,3 +227,5 @@ void do_raw_write_unlock(rwlock_t *lock)
+@@ -228,3 +231,5 @@ void do_raw_write_unlock(rwlock_t *lock)
debug_write_unlock(lock);
arch_write_unlock(&lock->raw_lock);
}
diff --git a/debian/patches-rt/rtmutex_dont_include_rcu.patch b/debian/patches-rt/rtmutex_dont_include_rcu.patch
index df8d9a937..ba20eb892 100644
--- a/debian/patches-rt/rtmutex_dont_include_rcu.patch
+++ b/debian/patches-rt/rtmutex_dont_include_rcu.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Subject: rbtree: don't include the rcu header
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The RCU header pulls in spinlock.h and fails due not yet defined types:
@@ -20,13 +20,13 @@ a new header file which can be included by both users.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/rbtree.h | 2 -
- include/linux/rcu_assign_pointer.h | 54 +++++++++++++++++++++++++++++++++++++
- include/linux/rcupdate.h | 49 ---------------------------------
- 3 files changed, 56 insertions(+), 49 deletions(-)
+ include/linux/rcu_assign_pointer.h | 63 +++++++++++++++++++++++++++++++++++++
+ include/linux/rcupdate.h | 57 ---------------------------------
+ 3 files changed, 65 insertions(+), 57 deletions(-)
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
-@@ -31,7 +31,7 @@
+@@ -19,7 +19,7 @@
#include <linux/kernel.h>
#include <linux/stddef.h>
@@ -37,12 +37,20 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
unsigned long __rb_parent_color;
--- /dev/null
+++ b/include/linux/rcu_assign_pointer.h
-@@ -0,0 +1,54 @@
+@@ -0,0 +1,63 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef __LINUX_RCU_ASSIGN_POINTER_H__
+#define __LINUX_RCU_ASSIGN_POINTER_H__
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
++#ifdef __CHECKER__
++#define rcu_check_sparse(p, space) \
++ ((void)(((typeof(*p) space *)p) == p))
++#else /* #ifdef __CHECKER__ */
++#define rcu_check_sparse(p, space)
++#endif /* #else #ifdef __CHECKER__ */
++
+/**
+ * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
+ * @v: The value to statically initialize with.
@@ -83,6 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#define rcu_assign_pointer(p, v) \
+({ \
+ uintptr_t _r_a_p__v = (uintptr_t)(v); \
++ rcu_check_sparse(p, __rcu); \
+ \
+ if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
+ WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
@@ -94,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
-@@ -42,6 +42,7 @@
+@@ -29,6 +29,7 @@
#include <linux/lockdep.h>
#include <asm/processor.h>
#include <linux/cpumask.h>
@@ -102,7 +111,21 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
-@@ -350,54 +351,6 @@ static inline void rcu_preempt_sleep_che
+@@ -301,13 +302,6 @@ static inline void rcu_preempt_sleep_che
+ * (e.g., __srcu), should this make sense in the future.
+ */
+
+-#ifdef __CHECKER__
+-#define rcu_check_sparse(p, space) \
+- ((void)(((typeof(*p) space *)p) == p))
+-#else /* #ifdef __CHECKER__ */
+-#define rcu_check_sparse(p, space)
+-#endif /* #else #ifdef __CHECKER__ */
+-
+ #define __rcu_access_pointer(p, space) \
+ ({ \
+ typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
+@@ -336,55 +330,6 @@ static inline void rcu_preempt_sleep_che
})
/**
@@ -145,6 +168,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-#define rcu_assign_pointer(p, v) \
-({ \
- uintptr_t _r_a_p__v = (uintptr_t)(v); \
+- rcu_check_sparse(p, __rcu); \
- \
- if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
- WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
diff --git a/debian/patches-rt/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch b/debian/patches-rt/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch
index d609b556b..942304260 100644
--- a/debian/patches-rt/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch
+++ b/debian/patches-rt/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch
@@ -2,7 +2,7 @@ From: Mike Galbraith <efault@gmx.de>
Date: Sun, 19 Aug 2018 08:28:35 +0200
Subject: [PATCH] sched: Allow pinned user tasks to be awakened to the CPU they
pinned
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Since commit 7af443ee16976 ("sched/core: Require cpu_active() in
select_task_rq(), for user tasks") select_fallback_rq() will BUG() if
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -922,7 +922,7 @@ static inline bool is_cpu_allowed(struct
+@@ -960,7 +960,7 @@ static inline bool is_cpu_allowed(struct
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
return false;
diff --git a/debian/patches-rt/sched-completion-Fix-a-lockup-in-wait_for_completion.patch b/debian/patches-rt/sched-completion-Fix-a-lockup-in-wait_for_completion.patch
new file mode 100644
index 000000000..feb678305
--- /dev/null
+++ b/debian/patches-rt/sched-completion-Fix-a-lockup-in-wait_for_completion.patch
@@ -0,0 +1,61 @@
+From: Corey Minyard <cminyard@mvista.com>
+Date: Thu, 9 May 2019 14:33:20 -0500
+Subject: [PATCH] sched/completion: Fix a lockup in wait_for_completion()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+Consider following race:
+
+ T0 T1 T2
+ wait_for_completion()
+ do_wait_for_common()
+ __prepare_to_swait()
+ schedule()
+ complete()
+ x->done++ (0 -> 1)
+ raw_spin_lock_irqsave()
+ swake_up_locked() wait_for_completion()
+ wake_up_process(T0)
+ list_del_init()
+ raw_spin_unlock_irqrestore()
+ raw_spin_lock_irq(&x->wait.lock)
+ raw_spin_lock_irq(&x->wait.lock) x->done != UINT_MAX, 1 -> 0
+ raw_spin_unlock_irq(&x->wait.lock)
+ return 1
+ while (!x->done && timeout),
+ continue loop, not enqueued
+ on &x->wait
+
+Basically, the problem is that the original wait queues used in
+completions did not remove the item from the queue in the wakeup
+function, but swake_up_locked() does.
+
+Fix it by adding the thread to the wait queue inside the do loop.
+The design of swait detects if it is already in the list and doesn't
+do the list add again.
+
+Cc: stable-rt@vger.kernel.org
+Fixes: a04ff6b4ec4ee7e ("completion: Use simple wait queues")
+Signed-off-by: Corey Minyard <cminyard@mvista.com>
+Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+[bigeasy: shorten commit message ]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/sched/completion.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/sched/completion.c
++++ b/kernel/sched/completion.c
+@@ -72,12 +72,12 @@ do_wait_for_common(struct completion *x,
+ if (!x->done) {
+ DECLARE_SWAITQUEUE(wait);
+
+- __prepare_to_swait(&x->wait, &wait);
+ do {
+ if (signal_pending_state(state, current)) {
+ timeout = -ERESTARTSYS;
+ break;
+ }
++ __prepare_to_swait(&x->wait, &wait);
+ __set_current_state(state);
+ raw_spin_unlock_irq(&x->wait.lock);
+ timeout = action(timeout);
diff --git a/debian/patches-rt/sched-core-Schedule-new-worker-even-if-PI-blocked.patch b/debian/patches-rt/sched-core-Schedule-new-worker-even-if-PI-blocked.patch
new file mode 100644
index 000000000..c385d5889
--- /dev/null
+++ b/debian/patches-rt/sched-core-Schedule-new-worker-even-if-PI-blocked.patch
@@ -0,0 +1,44 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 29 May 2019 17:52:17 +0200
+Subject: [PATCH] sched/core: Schedule new worker even if PI-blocked
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+If a task is PI-blocked (blocking on sleeping spinlock) then we don't
+schedule a new kworker if we schedule out due to lock contention because
+!RT wouldn't do that as well. A spinlock disables preemption and worker
+wouldn't schedule out on lock contention (but spin) so it wouldn't start
+a new worker.
+
+On RT the RW-semaphore implementation uses an rtmutex so
+tsk_is_pi_blocked() will return true if a task blocks on it. In case we
+will skip scheduling a worker which may deadlock if one worker is
+waiting on progress from another worker. XFS is able to trigger this.
+
+Allow to schedule new worker if the current worker is PI-blocked.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/sched/core.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3469,7 +3469,7 @@ void __noreturn do_task_dead(void)
+
+ static inline void sched_submit_work(struct task_struct *tsk)
+ {
+- if (!tsk->state || tsk_is_pi_blocked(tsk))
++ if (!tsk->state)
+ return;
+
+ /*
+@@ -3485,6 +3485,9 @@ static inline void sched_submit_work(str
+ preempt_enable_no_resched();
+ }
+
++ if (tsk_is_pi_blocked(tsk))
++ return;
++
+ /*
+ * If we are going to sleep and we have plugged IO queued,
+ * make sure to submit it to avoid deadlocks.
diff --git a/debian/patches-rt/sched-delay-put-task.patch b/debian/patches-rt/sched-delay-put-task.patch
index 82586434d..0a77e8676 100644
--- a/debian/patches-rt/sched-delay-put-task.patch
+++ b/debian/patches-rt/sched-delay-put-task.patch
@@ -1,7 +1,7 @@
Subject: sched: Move task_struct cleanup to RCU
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 31 May 2011 16:59:16 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
__put_task_struct() does quite some expensive work. We don't want to
burden random tasks with that.
@@ -9,13 +9,13 @@ burden random tasks with that.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/sched.h | 3 +++
- include/linux/sched/task.h | 11 ++++++++++-
- kernel/fork.c | 15 ++++++++++++++-
- 3 files changed, 27 insertions(+), 2 deletions(-)
+ include/linux/sched/task.h | 12 +++++++++++-
+ kernel/fork.c | 14 ++++++++++++++
+ 3 files changed, 28 insertions(+), 1 deletion(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1190,6 +1190,9 @@ struct task_struct {
+@@ -1191,6 +1191,9 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
@@ -27,24 +27,25 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
-@@ -90,6 +90,15 @@ extern void sched_exec(void);
+@@ -91,6 +91,16 @@ extern void sched_exec(void);
- #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+ #define get_task_struct(tsk) do { refcount_inc(&(tsk)->usage); } while(0)
+#ifdef CONFIG_PREEMPT_RT_BASE
+extern void __put_task_struct_cb(struct rcu_head *rhp);
+
+static inline void put_task_struct(struct task_struct *t)
+{
-+ if (atomic_dec_and_test(&t->usage))
++ if (refcount_dec_and_test(&t->usage))
+ call_rcu(&t->put_rcu, __put_task_struct_cb);
+}
+#else
++
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
-@@ -97,7 +106,7 @@ static inline void put_task_struct(struc
- if (atomic_dec_and_test(&t->usage))
+@@ -98,7 +108,7 @@ static inline void put_task_struct(struc
+ if (refcount_dec_and_test(&t->usage))
__put_task_struct(t);
}
-
@@ -54,18 +55,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -713,7 +713,9 @@ static inline void put_signal_struct(str
- if (atomic_dec_and_test(&sig->sigcnt))
+@@ -720,6 +720,9 @@ static inline void put_signal_struct(str
free_signal_struct(sig);
}
--
+
+#ifdef CONFIG_PREEMPT_RT_BASE
+static
+#endif
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!tsk->exit_state);
-@@ -730,7 +732,18 @@ void __put_task_struct(struct task_struc
+@@ -736,7 +739,18 @@ void __put_task_struct(struct task_struc
if (!profile_handoff_task(tsk))
free_task(tsk);
}
diff --git a/debian/patches-rt/sched-disable-rt-group-sched-on-rt.patch b/debian/patches-rt/sched-disable-rt-group-sched-on-rt.patch
index 445466b01..af28c7f19 100644
--- a/debian/patches-rt/sched-disable-rt-group-sched-on-rt.patch
+++ b/debian/patches-rt/sched-disable-rt-group-sched-on-rt.patch
@@ -1,7 +1,7 @@
Subject: sched: Disable CONFIG_RT_GROUP_SCHED on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 18 Jul 2011 17:03:52 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Carsten reported problems when running:
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -814,6 +814,7 @@ config CFS_BANDWIDTH
+@@ -840,6 +840,7 @@ config CFS_BANDWIDTH
config RT_GROUP_SCHED
bool "Group scheduling for SCHED_RR/FIFO"
depends on CGROUP_SCHED
diff --git a/debian/patches-rt/sched-disable-ttwu-queue.patch b/debian/patches-rt/sched-disable-ttwu-queue.patch
index 414e5b4c9..bd8a6cc79 100644
--- a/debian/patches-rt/sched-disable-ttwu-queue.patch
+++ b/debian/patches-rt/sched-disable-ttwu-queue.patch
@@ -1,7 +1,7 @@
Subject: sched: Disable TTWU_QUEUE on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 13 Sep 2011 16:42:35 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
The queued remote wakeup mechanism can introduce rather large
latencies if the number of migrated tasks is high. Disable it for RT.
diff --git a/debian/patches-rt/sched-fair-Make-the-hrtimers-non-hard-again.patch b/debian/patches-rt/sched-fair-Make-the-hrtimers-non-hard-again.patch
index cec2a3c07..8d476c343 100644
--- a/debian/patches-rt/sched-fair-Make-the-hrtimers-non-hard-again.patch
+++ b/debian/patches-rt/sched-fair-Make-the-hrtimers-non-hard-again.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 8 Jan 2019 12:31:06 +0100
Subject: [PATCH] sched/fair: Make the hrtimers non-hard again
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Since commit "sched/fair: Robustify CFS-bandwidth timer locking" both
hrtimer can run in softirq context because now interrupts are disabled
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -4916,9 +4916,9 @@ void init_cfs_bandwidth(struct cfs_bandw
+@@ -4945,9 +4945,9 @@ void init_cfs_bandwidth(struct cfs_bandw
cfs_b->period = ns_to_ktime(default_cfs_period());
INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
diff --git a/debian/patches-rt/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch b/debian/patches-rt/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch
deleted file mode 100644
index 2b55d4721..000000000
--- a/debian/patches-rt/sched-fair-Robustify-CFS-bandwidth-timer-locking.patch
+++ /dev/null
@@ -1,145 +0,0 @@
-From: Peter Zijlstra <peterz@infradead.org>
-Date: Mon, 7 Jan 2019 13:52:31 +0100
-Subject: [PATCH] sched/fair: Robustify CFS-bandwidth timer locking
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Traditionally hrtimer callbacks were run with IRQs disabled, but with
-the introduction of HRTIMER_MODE_SOFT it is possible they run from
-SoftIRQ context, which does _NOT_ have IRQs disabled.
-
-Allow for the CFS bandwidth timers (period_timer and slack_timer) to
-be ran from SoftIRQ context; this entails removing the assumption that
-IRQs are already disabled from the locking.
-
-While mainline doesn't strictly need this, -RT forces all timers not
-explicitly marked with MODE_HARD into MODE_SOFT and trips over this.
-And marking these timers as MODE_HARD doesn't make sense as they're
-not required for RT operation and can potentially be quite expensive.
-
-Cc: Ingo Molnar <mingo@redhat.com>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Reported-by: Tom Putzeys <tom.putzeys@be.atlascopco.com>
-Tested-by: Mike Galbraith <efault@gmx.de>
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Link: https://lkml.kernel.org/r/20190107125231.GE14122@hirez.programming.kicks-ass.net
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/sched/fair.c | 30 ++++++++++++++++--------------
- 1 file changed, 16 insertions(+), 14 deletions(-)
-
---- a/kernel/sched/fair.c
-+++ b/kernel/sched/fair.c
-@@ -4565,7 +4565,7 @@ static u64 distribute_cfs_runtime(struct
- struct rq *rq = rq_of(cfs_rq);
- struct rq_flags rf;
-
-- rq_lock(rq, &rf);
-+ rq_lock_irqsave(rq, &rf);
- if (!cfs_rq_throttled(cfs_rq))
- goto next;
-
-@@ -4582,7 +4582,7 @@ static u64 distribute_cfs_runtime(struct
- unthrottle_cfs_rq(cfs_rq);
-
- next:
-- rq_unlock(rq, &rf);
-+ rq_unlock_irqrestore(rq, &rf);
-
- if (!remaining)
- break;
-@@ -4598,7 +4598,7 @@ static u64 distribute_cfs_runtime(struct
- * period the timer is deactivated until scheduling resumes; cfs_b->idle is
- * used to track this state.
- */
--static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
-+static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
- {
- u64 runtime, runtime_expires;
- int throttled;
-@@ -4640,11 +4640,11 @@ static int do_sched_cfs_period_timer(str
- while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
- runtime = cfs_b->runtime;
- cfs_b->distribute_running = 1;
-- raw_spin_unlock(&cfs_b->lock);
-+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
- /* we can't nest cfs_b->lock while distributing bandwidth */
- runtime = distribute_cfs_runtime(cfs_b, runtime,
- runtime_expires);
-- raw_spin_lock(&cfs_b->lock);
-+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
-
- cfs_b->distribute_running = 0;
- throttled = !list_empty(&cfs_b->throttled_cfs_rq);
-@@ -4753,17 +4753,18 @@ static __always_inline void return_cfs_r
- static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
- {
- u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
-+ unsigned long flags;
- u64 expires;
-
- /* confirm we're still not at a refresh boundary */
-- raw_spin_lock(&cfs_b->lock);
-+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
- if (cfs_b->distribute_running) {
-- raw_spin_unlock(&cfs_b->lock);
-+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
- return;
- }
-
- if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
-- raw_spin_unlock(&cfs_b->lock);
-+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
- return;
- }
-
-@@ -4774,18 +4775,18 @@ static void do_sched_cfs_slack_timer(str
- if (runtime)
- cfs_b->distribute_running = 1;
-
-- raw_spin_unlock(&cfs_b->lock);
-+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
-
- if (!runtime)
- return;
-
- runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
-
-- raw_spin_lock(&cfs_b->lock);
-+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
- if (expires == cfs_b->runtime_expires)
- lsub_positive(&cfs_b->runtime, runtime);
- cfs_b->distribute_running = 0;
-- raw_spin_unlock(&cfs_b->lock);
-+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
- }
-
- /*
-@@ -4865,11 +4866,12 @@ static enum hrtimer_restart sched_cfs_pe
- {
- struct cfs_bandwidth *cfs_b =
- container_of(timer, struct cfs_bandwidth, period_timer);
-+ unsigned long flags;
- int overrun;
- int idle = 0;
- int count = 0;
-
-- raw_spin_lock(&cfs_b->lock);
-+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
- for (;;) {
- overrun = hrtimer_forward_now(timer, cfs_b->period);
- if (!overrun)
-@@ -4897,11 +4899,11 @@ static enum hrtimer_restart sched_cfs_pe
- count = 0;
- }
-
-- idle = do_sched_cfs_period_timer(cfs_b, overrun);
-+ idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
- }
- if (idle)
- cfs_b->period_active = 0;
-- raw_spin_unlock(&cfs_b->lock);
-+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
-
- return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
- }
diff --git a/debian/patches-rt/sched-limit-nr-migrate.patch b/debian/patches-rt/sched-limit-nr-migrate.patch
index e84dbb6cc..0754b8f7e 100644
--- a/debian/patches-rt/sched-limit-nr-migrate.patch
+++ b/debian/patches-rt/sched-limit-nr-migrate.patch
@@ -1,7 +1,7 @@
Subject: sched: Limit the number of task migrations per batch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 06 Jun 2011 12:12:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Put an upper limit on the number of tasks which are migrated per batch
to avoid large latencies.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -44,7 +44,11 @@ const_debug unsigned int sysctl_sched_fe
+@@ -45,7 +45,11 @@ const_debug unsigned int sysctl_sched_fe
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
diff --git a/debian/patches-rt/sched-might-sleep-do-not-account-rcu-depth.patch b/debian/patches-rt/sched-might-sleep-do-not-account-rcu-depth.patch
index ec5b8da3a..6e1649b36 100644
--- a/debian/patches-rt/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/debian/patches-rt/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -1,7 +1,7 @@
Subject: sched: Do not account rcu_preempt_depth on RT in might_sleep()
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 07 Jun 2011 09:19:06 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
RT changes the rcu_preempt_depth semantics, so we cannot check for it
in might_sleep().
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
-@@ -64,6 +64,11 @@ void __rcu_read_unlock(void);
+@@ -51,6 +51,11 @@ void __rcu_read_unlock(void);
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
*/
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else /* #ifdef CONFIG_PREEMPT_RCU */
-@@ -84,6 +89,8 @@ static inline int rcu_preempt_depth(void
+@@ -69,6 +74,8 @@ static inline int rcu_preempt_depth(void
return 0;
}
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -6168,7 +6168,7 @@ void __init sched_init(void)
+@@ -6142,7 +6142,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/debian/patches-rt/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch b/debian/patches-rt/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch
index 376f8a3a8..f74680441 100644
--- a/debian/patches-rt/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch
+++ b/debian/patches-rt/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 9 Oct 2018 17:34:50 +0200
Subject: [PATCH] sched/migrate_disable: Add export_symbol_gpl for
__migrate_disabled
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Jonathan reported that lttng/modules can't use __migrate_disabled().
This function is only used by sched/core itself and the tracing
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1031,6 +1031,7 @@ int __migrate_disabled(struct task_struc
+@@ -1065,6 +1065,7 @@ int __migrate_disabled(struct task_struc
{
return p->migrate_disable;
}
diff --git a/debian/patches-rt/sched-migrate_disable-fallback-to-preempt_disable-in.patch b/debian/patches-rt/sched-migrate_disable-fallback-to-preempt_disable-in.patch
index 02d43e0de..da425e4c5 100644
--- a/debian/patches-rt/sched-migrate_disable-fallback-to-preempt_disable-in.patch
+++ b/debian/patches-rt/sched-migrate_disable-fallback-to-preempt_disable-in.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 5 Jul 2018 14:44:51 +0200
Subject: [PATCH] sched/migrate_disable: fallback to preempt_disable() instead
barrier()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
On SMP + !RT migrate_disable() is still around. It is not part of spin_lock()
anymore so it has almost no users. However the futex code has a workaround for
@@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -666,7 +666,7 @@ struct task_struct {
+@@ -658,7 +658,7 @@ struct task_struct {
int nr_cpus_allowed;
const cpumask_t *cpus_ptr;
cpumask_t cpus_mask;
@@ -78,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int migrate_disable;
int migrate_disable_update;
# ifdef CONFIG_SCHED_DEBUG
-@@ -674,8 +674,8 @@ struct task_struct {
+@@ -666,8 +666,8 @@ struct task_struct {
# endif
#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
@@ -90,7 +90,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1049,7 +1049,7 @@ void set_cpus_allowed_common(struct task
+@@ -1087,7 +1087,7 @@ void set_cpus_allowed_common(struct task
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
@@ -99,7 +99,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int __migrate_disabled(struct task_struct *p)
{
return p->migrate_disable;
-@@ -1089,7 +1089,7 @@ static void __do_set_cpus_allowed_tail(s
+@@ -1127,7 +1127,7 @@ static void __do_set_cpus_allowed_tail(s
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
@@ -108,7 +108,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (__migrate_disabled(p)) {
lockdep_assert_held(&p->pi_lock);
-@@ -1162,7 +1162,7 @@ static int __set_cpus_allowed_ptr(struct
+@@ -1200,7 +1200,7 @@ static int __set_cpus_allowed_ptr(struct
if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
goto out;
@@ -117,7 +117,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (__migrate_disabled(p)) {
p->migrate_disable_update = 1;
goto out;
-@@ -7177,7 +7177,7 @@ const u32 sched_prio_to_wmult[40] = {
+@@ -7193,7 +7193,7 @@ const u32 sched_prio_to_wmult[40] = {
#undef CREATE_TRACE_POINTS
@@ -126,7 +126,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void
update_nr_migratory(struct task_struct *p, long delta)
-@@ -7325,45 +7325,44 @@ EXPORT_SYMBOL(migrate_enable);
+@@ -7339,45 +7339,44 @@ EXPORT_SYMBOL(migrate_enable);
#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
void migrate_disable(void)
{
@@ -181,7 +181,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
-@@ -982,7 +982,7 @@ void proc_sched_show_task(struct task_st
+@@ -979,7 +979,7 @@ void proc_sched_show_task(struct task_st
P(dl.runtime);
P(dl.deadline);
}
diff --git a/debian/patches-rt/sched-mmdrop-delayed.patch b/debian/patches-rt/sched-mmdrop-delayed.patch
index 0f4ac466a..e359c8136 100644
--- a/debian/patches-rt/sched-mmdrop-delayed.patch
+++ b/debian/patches-rt/sched-mmdrop-delayed.patch
@@ -1,7 +1,7 @@
Subject: sched: Move mmdrop to RCU on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 06 Jun 2011 12:20:33 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Takes sleeping locks and calls into the memory allocator, so nothing
we want to do in task switch and oder atomic contexts.
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/page-flags-layout.h>
#include <linux/workqueue.h>
-@@ -487,6 +488,9 @@ struct mm_struct {
+@@ -496,6 +497,9 @@ struct mm_struct {
bool tlb_flush_batched;
#endif
struct uprobes_state uprobes_state;
@@ -56,7 +56,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* followed by taking the mmap_sem for writing before modifying the
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -679,6 +679,19 @@ void __mmdrop(struct mm_struct *mm)
+@@ -685,6 +685,19 @@ void __mmdrop(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(__mmdrop);
@@ -78,7 +78,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct mm_struct *mm;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2748,9 +2748,13 @@ static struct rq *finish_task_switch(str
+@@ -2716,9 +2716,13 @@ static struct rq *finish_task_switch(str
* provided by mmdrop(),
* - a sync_core for SYNC_CORE.
*/
@@ -93,7 +93,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
-@@ -5569,6 +5573,8 @@ void sched_setnuma(struct task_struct *p
+@@ -5546,6 +5550,8 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
@@ -102,7 +102,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Ensure that the idle task is using init_mm right before its CPU goes
* offline.
-@@ -5584,7 +5590,11 @@ void idle_task_exit(void)
+@@ -5561,7 +5567,11 @@ void idle_task_exit(void)
current->active_mm = &init_mm;
finish_arch_post_lock_switch();
}
@@ -115,7 +115,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -5896,6 +5906,10 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -5873,6 +5883,10 @@ int sched_cpu_dying(unsigned int cpu)
update_max_interval();
nohz_balance_exit_idle(rq);
hrtick_clear(rq);
diff --git a/debian/patches-rt/sched-rt-mutex-wakeup.patch b/debian/patches-rt/sched-rt-mutex-wakeup.patch
index 0d46d9395..7fb62d6d7 100644
--- a/debian/patches-rt/sched-rt-mutex-wakeup.patch
+++ b/debian/patches-rt/sched-rt-mutex-wakeup.patch
@@ -1,7 +1,7 @@
Subject: sched: Add saved_state for tasks blocked on sleeping locks
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 25 Jun 2011 09:21:04 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Spinlocks are state preserving in !RT. RT changes the state when a
task gets blocked on a lock. So we need to remember the state before
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -599,6 +599,8 @@ struct task_struct {
+@@ -591,6 +591,8 @@ struct task_struct {
#endif
/* -1 unrunnable, 0 runnable, >0 stopped: */
volatile long state;
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* This begins the randomizable portion of task_struct. Only
-@@ -1624,6 +1626,7 @@ extern struct task_struct *find_get_task
+@@ -1630,6 +1632,7 @@ extern struct task_struct *find_get_task
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2017,8 +2017,27 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2033,8 +2033,27 @@ try_to_wake_up(struct task_struct *p, un
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
smp_mb__after_spinlock();
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
trace_sched_waking(p);
-@@ -2183,6 +2202,18 @@ int wake_up_process(struct task_struct *
+@@ -2149,6 +2168,18 @@ int wake_up_process(struct task_struct *
}
EXPORT_SYMBOL(wake_up_process);
@@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return try_to_wake_up(p, state, 0);
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1572,6 +1572,7 @@ static inline int task_on_rq_migrating(s
+@@ -1591,6 +1591,7 @@ static inline int task_on_rq_migrating(s
#define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* Child wakeup after fork */
#define WF_MIGRATED 0x4 /* Internal use, task got migrated */
diff --git a/debian/patches-rt/sched-swait-Add-swait_event_lock_irq.patch b/debian/patches-rt/sched-swait-Add-swait_event_lock_irq.patch
new file mode 100644
index 000000000..e4687503e
--- /dev/null
+++ b/debian/patches-rt/sched-swait-Add-swait_event_lock_irq.patch
@@ -0,0 +1,33 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 22 May 2019 12:42:26 +0200
+Subject: [PATCH] sched/swait: Add swait_event_lock_irq()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+The swait_event_lock_irq() is inspired by wait_event_lock_irq().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/swait.h | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/include/linux/swait.h
++++ b/include/linux/swait.h
+@@ -297,4 +297,18 @@ do { \
+ __ret; \
+ })
+
++#define __swait_event_lock_irq(wq, condition, lock, cmd) \
++ ___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
++ raw_spin_unlock_irq(&lock); \
++ cmd; \
++ schedule(); \
++ raw_spin_lock_irq(&lock))
++
++#define swait_event_lock_irq(wq_head, condition, lock) \
++ do { \
++ if (condition) \
++ break; \
++ __swait_event_lock_irq(wq_head, condition, lock, ); \
++ } while (0)
++
+ #endif /* _LINUX_SWAIT_H */
diff --git a/debian/patches-rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/debian/patches-rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
deleted file mode 100644
index 33c3fe5f0..000000000
--- a/debian/patches-rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-From: Steven Rostedt <rostedt@goodmis.org>
-Date: Mon, 18 Mar 2013 15:12:49 -0400
-Subject: sched/workqueue: Only wake up idle workers if not blocked on sleeping spin lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-In -rt, most spin_locks() turn into mutexes. One of these spin_lock
-conversions is performed on the workqueue gcwq->lock. When the idle
-worker is worken, the first thing it will do is grab that same lock and
-it too will block, possibly jumping into the same code, but because
-nr_running would already be decremented it prevents an infinite loop.
-
-But this is still a waste of CPU cycles, and it doesn't follow the method
-of mainline, as new workers should only be woken when a worker thread is
-truly going to sleep, and not just blocked on a spin_lock().
-
-Check the saved_state too before waking up new workers.
-
-
-Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/sched/core.c | 4 +++-
- 1 file changed, 3 insertions(+), 1 deletion(-)
-
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -3510,8 +3510,10 @@ static void __sched notrace __schedule(b
- * If a worker went to sleep, notify and ask workqueue
- * whether it wants to wake up a task to maintain
- * concurrency.
-+ * Only call wake up if prev isn't blocked on a sleeping
-+ * spin lock.
- */
-- if (prev->flags & PF_WQ_WORKER) {
-+ if (prev->flags & PF_WQ_WORKER && !prev->saved_state) {
- struct task_struct *to_wakeup;
-
- to_wakeup = wq_worker_sleeping(prev);
diff --git a/debian/patches-rt/scsi-fcoe-rt-aware.patch b/debian/patches-rt/scsi-fcoe-rt-aware.patch
index 16a14809b..2cfe5e3ac 100644
--- a/debian/patches-rt/scsi-fcoe-rt-aware.patch
+++ b/debian/patches-rt/scsi-fcoe-rt-aware.patch
@@ -1,7 +1,7 @@
Subject: scsi/fcoe: Make RT aware.
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 12 Nov 2011 14:00:48 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Do not disable preemption while taking sleeping locks. All user look safe
for migrate_diable() only.
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
-@@ -1458,11 +1458,11 @@ static int fcoe_rcv(struct sk_buff *skb,
+@@ -1446,11 +1446,11 @@ static int fcoe_rcv(struct sk_buff *skb,
static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
{
struct fcoe_percpu_s *fps;
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return rc;
}
-@@ -1649,11 +1649,11 @@ static inline int fcoe_filter_frames(str
+@@ -1637,11 +1637,11 @@ static inline int fcoe_filter_frames(str
return 0;
}
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return -EINVAL;
}
-@@ -1694,7 +1694,7 @@ static void fcoe_recv_frame(struct sk_bu
+@@ -1682,7 +1682,7 @@ static void fcoe_recv_frame(struct sk_bu
*/
hp = (struct fcoe_hdr *) skb_network_header(skb);
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
if (stats->ErrorFrames < 5)
printk(KERN_WARNING "fcoe: FCoE version "
-@@ -1726,13 +1726,13 @@ static void fcoe_recv_frame(struct sk_bu
+@@ -1714,13 +1714,13 @@ static void fcoe_recv_frame(struct sk_bu
goto drop;
if (!fcoe_filter_frames(lport, fp)) {
@@ -71,7 +71,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
-@@ -838,7 +838,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
+@@ -826,7 +826,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
INIT_LIST_HEAD(&del_list);
@@ -80,7 +80,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
-@@ -874,7 +874,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
+@@ -862,7 +862,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
sel_time = fcf->time;
}
}
@@ -91,7 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Removes fcf from current list */
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
-@@ -833,10 +833,10 @@ static struct fc_exch *fc_exch_em_alloc(
+@@ -821,10 +821,10 @@ static struct fc_exch *fc_exch_em_alloc(
}
memset(ep, 0, sizeof(*ep));
diff --git a/debian/patches-rt/seqlock-prevent-rt-starvation.patch b/debian/patches-rt/seqlock-prevent-rt-starvation.patch
index b6f210ec1..74c0e816c 100644
--- a/debian/patches-rt/seqlock-prevent-rt-starvation.patch
+++ b/debian/patches-rt/seqlock-prevent-rt-starvation.patch
@@ -1,7 +1,7 @@
Subject: seqlock: Prevent rt starvation
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 22 Feb 2012 12:03:30 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
If a low prio writer gets preempted while holding the seqlock write
locked, a high prio reader spins forever on RT.
@@ -160,7 +160,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
-@@ -458,7 +458,7 @@ static inline int neigh_hh_bridge(struct
+@@ -460,7 +460,7 @@ static inline int neigh_hh_bridge(struct
}
#endif
@@ -169,16 +169,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
unsigned int hh_alen = 0;
unsigned int seq;
-@@ -500,7 +500,7 @@ static inline int neigh_hh_output(const
-
- static inline int neigh_output(struct neighbour *n, struct sk_buff *skb)
+@@ -503,7 +503,7 @@ static inline int neigh_hh_output(const
+ static inline int neigh_output(struct neighbour *n, struct sk_buff *skb,
+ bool skip_cache)
{
- const struct hh_cache *hh = &n->hh;
+ struct hh_cache *hh = &n->hh;
- if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
+ if ((n->nud_state & NUD_CONNECTED) && hh->hh_len && !skip_cache)
return neigh_hh_output(hh, skb);
-@@ -541,7 +541,7 @@ struct neighbour_cb {
+@@ -544,7 +544,7 @@ struct neighbour_cb {
#define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
diff --git a/debian/patches-rt/serial-8250-export-symbols-which-are-used-by-symbols.patch b/debian/patches-rt/serial-8250-export-symbols-which-are-used-by-symbols.patch
index bd2406bb0..7b8d74810 100644
--- a/debian/patches-rt/serial-8250-export-symbols-which-are-used-by-symbols.patch
+++ b/debian/patches-rt/serial-8250-export-symbols-which-are-used-by-symbols.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat, 16 Feb 2019 09:02:00 +0100
Subject: [PATCH] serial: 8250: export symbols which are used by symbols
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -2200,6 +2200,7 @@ int is_console_locked(void)
+@@ -2247,6 +2247,7 @@ int is_console_locked(void)
{
return console_locked;
}
diff --git a/debian/patches-rt/serial-8250-remove-that-trylock-in-serial8250_consol.patch b/debian/patches-rt/serial-8250-remove-that-trylock-in-serial8250_consol.patch
index 98bcd49e6..52386d39a 100644
--- a/debian/patches-rt/serial-8250-remove-that-trylock-in-serial8250_consol.patch
+++ b/debian/patches-rt/serial-8250-remove-that-trylock-in-serial8250_consol.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 14 Feb 2019 17:38:24 +0100
Subject: [PATCH] serial: 8250: remove that trylock in
serial8250_console_write_atomic()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
This does not work as rtmutex in NMI context. As per John, it is not
needed.
diff --git a/debian/patches-rt/series b/debian/patches-rt/series
index 925859dc5..b7f09d5e9 100644
--- a/debian/patches-rt/series
+++ b/debian/patches-rt/series
@@ -5,27 +5,10 @@
############################################################
# UPSTREAM merged
############################################################
-tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch
############################################################
# POSTED by others
############################################################
-# AT91
-# Alexandre Belloni | [PATCH v3 0/9] clocksource: improve Atmel TCB timer driver
-# Date: Fri, 26 Apr 2019 23:47:09 +0200
-0001-ARM-at91-move-SoC-specific-definitions-to-SoC-folder.patch
-0002-clocksource-drivers-tcb_clksrc-stop-depending-on-atm.patch
-0003-clocksource-drivers-tcb_clksrc-Use-tcb-as-sched_cloc.patch
-0004-ARM-at91-Implement-clocksource-selection.patch
-0005-clocksource-drivers-tcb_clksrc-move-Kconfig-option.patch
-0006-clocksource-drivers-timer-atmel-pit-rework-Kconfig-o.patch
-0007-clocksource-drivers-tcb_clksrc-Rename-the-file-for-c.patch
-0008-clocksource-drivers-timer-atmel-tcb-tc_clksrc_suspen.patch
-0009-misc-atmel_tclib-do-not-probe-already-used-TCBs.patch
-
-kthread-convert-worker-lock-to-raw-spinlock.patch
-sched-fair-Robustify-CFS-bandwidth-timer-locking.patch
-x86-ima-Check-EFI_RUNTIME_SERVICES-before-using.patch
# John's printk series
# [RFC PATCH v1 00/25] printk: new implementation
@@ -74,49 +57,27 @@ fscache-initialize-cookie-hash-table-raw-spinlocks.patch
Drivers-hv-vmbus-include-header-for-get_irq_regs.patch
percpu-include-irqflags.h-for-raw_local_irq_save.patch
mm-workingset-replace-IRQ-off-check-with-a-lockdep-a.patch
-crypto-chtls-remove-cdev_list_lock.patch
-crypto-user-remove-crypto_cfg_mutex.patch
tpm-remove-tpm_dev_wq_lock.patch
drm-i915-Don-t-disable-interrupts-independently-of-t.patch
-
-# [PATCH v9] x86: load FPU registers on return to userland
-# Date: Wed, 3 Apr 2019 18:41:29 +0200
-0001-x86-fpu-Remove-fpu-initialized-usage-in-__fpu__resto.patch
-0002-x86-fpu-Remove-fpu__restore.patch
-0003-x86-fpu-Remove-preempt_disable-in-fpu__clear.patch
-0004-x86-fpu-Always-init-the-state-in-fpu__clear.patch
-0005-x86-fpu-Remove-fpu-initialized-usage-in-copy_fpstate.patch
-0006-x86-fpu-Don-t-save-fxregs-for-ia32-frames-in-copy_fp.patch
-0007-x86-fpu-Remove-fpu-initialized.patch
-0008-x86-fpu-Remove-user_fpu_begin.patch
-0009-x86-fpu-Add-__-make_fpregs_active-helpers.patch
-0010-x86-fpu-Make-__raw_xsave_addr-use-feature-number-ins.patch
-0011-x86-fpu-Make-get_xsave_field_ptr-and-get_xsave_addr-.patch
-0012-x86-pkru-Provide-.-_pkru_ins-functions.patch
-0013-x86-fpu-Only-write-PKRU-if-it-is-different-from-curr.patch
-0014-x86-pkeys-Don-t-check-if-PKRU-is-zero-before-writtin.patch
-0015-x86-fpu-Eager-switch-PKRU-state.patch
-0016-x86-entry-Add-TIF_NEED_FPU_LOAD.patch
-0017-x86-fpu-Always-store-the-registers-in-copy_fpstate_t.patch
-0018-x86-fpu-Prepare-copy_fpstate_to_sigframe-for-TIF_NEE.patch
-0019-x86-fpu-Update-xstate-s-PKRU-value-on-write_pkru.patch
-0020-x86-fpu-Inline-copy_user_to_fpregs_zeroing.patch
-0021-x86-fpu-Let-__fpu__restore_sig-restore-the-32bit-fxs.patch
-0022-x86-fpu-Merge-the-two-code-paths-in-__fpu__restore_s.patch
-0023-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch
-0024-x86-fpu-Add-a-fastpath-to-__fpu__restore_sig.patch
-0025-x86-fpu-Add-a-fastpath-to-copy_fpstate_to_sigframe.patch
-0026-x86-fpu-Restore-FPU-register-in-copy_fpstate_to_sigf.patch
-0027-x86-pkeys-add-PKRU-value-to-init_fpstate.patch
-0028-x86-fpu-Fault-in-user-stack-if-copy_fpstate_to_sigfr.patch
-0029-x86-fpu-Remove-unnecessary-saving-of-FPU-registers-i.patch
+locking-lockdep-Don-t-complain-about-incorrect-name-.patch
+arm-imx6-cpuidle-Use-raw_spinlock_t.patch
+x86-ldt-Initialize-the-context-lock-for-init_mm.patch
############################################################
# Ready for posting
############################################################
efi-Allow-efi-runtime.patch
-drm-i915-fence-Do-not-use-TIMER_IRQSAFE.patch
-kthread-Do-not-use-TIMER_IRQSAFE.patch
+
+softirq-Add-preemptible-softirq.patch
+sched-swait-Add-swait_event_lock_irq.patch
+# WORKQUEUE
+workqueue-Make-alloc-apply-free_workqueue_attrs-stat.patch
+workqueue-Remove-GPF-argument-from-alloc_workqueue_a.patch
+workqueue-Convert-the-locks-to-raw-type.patch
+sched-core-Schedule-new-worker-even-if-PI-blocked.patch
+# The two below use a workqueue as workaround
+fs-aio-simple-simple-work.patch
+block-blk-mq-move-blk_queue_usage_counter_release-in.patch
############################################################
# Needs to address review feedback
@@ -127,8 +88,16 @@ kthread-Do-not-use-TIMER_IRQSAFE.patch
############################################################
arm64-KVM-compute_layout-before-altenates-are-applie.patch
of-allocate-free-phandle-cache-outside-of-the-devtre.patch
-EXP-rcu-skip_workqueue.patch
kmemleak-Turn-kmemleak_lock-to-raw-spinlock-on-RT.patch
+futex-Make-the-futex_hash_bucket-lock-raw.patch
+futex-Delay-deallocation-of-pi_state.patch
+
+# Deleting active timer
+timers-Introduce-expiry-spin-lock.patch
+timers-Drop-expiry-lock-after-each-timer-invocation.patch
+hrtimer-Introduce-expiry-spin-lock.patch
+posix-timers-move-rcu-out-of-union.patch
+posix-timers-expiry-lock.patch
###############################################################
# Stuff broken upstream and upstream wants something different
@@ -141,6 +110,7 @@ sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch
# Those two should vanish soon (not use PIT during bootup)
at91_dont_enable_disable_clock.patch
clocksource-tclib-allow-higher-clockrates.patch
+clocksource-tclib-add-proper-depend.patch
# Timekeeping split jiffies lock. Needs a good argument :)
timekeeping-split-jiffies-lock.patch
@@ -163,26 +133,23 @@ kconfig-disable-a-few-options-rt.patch
lockdep-disable-self-test.patch
mm-disable-sloub-rt.patch
mutex-no-spin-on-rt.patch
-rcu-disable-rcu-fast-no-hz-on-rt.patch
rcu-make-RCU_BOOST-default-on-RT.patch
sched-disable-rt-group-sched-on-rt.patch
net_disable_NET_RX_BUSY_POLL.patch
arm-disable-NEON-in-kernel-mode.patch
-power-use-generic-rwsem-on-rt.patch
powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
power-disable-highmem-on-rt.patch
mips-disable-highmem-on-rt.patch
-x86-use-gen-rwsem-spinlocks-rt.patch
leds-trigger-disable-CPU-trigger-on-RT.patch
cpufreq-drop-K8-s-driver-from-beeing-selected.patch
md-disable-bcache.patch
efi-Disable-runtime-services-on-RT.patch
+x86-Disable-HAVE_ARCH_JUMP_LABEL.patch
# PREEMPT NORT
preempt-nort-rt-variants.patch
# local locks & migrate disable
-futex-workaround-migrate_disable-enable-in-different.patch
rt-local-irq-lock.patch
locallock-provide-get-put-_locked_ptr-variants.patch
@@ -229,6 +196,7 @@ slub-disable-SLUB_CPU_PARTIAL.patch
mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
mm-memcontrol-do_not_disable_irq.patch
mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
+mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch
# RADIX TREE
radix-tree-use-local-locks.patch
@@ -242,19 +210,13 @@ x86-kvm-require-const-tsc-for-rt.patch
# SIMPLE WAITQUEUE
pci-switchtec-Don-t-use-completion-s-wait-queue.patch
wait.h-include-atomic.h.patch
-kthread-add-a-global-worker-thread.patch
completion-use-simple-wait-queues.patch
-fs-aio-simple-simple-work.patch
-genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
-psi-replace-delayed-work-with-timer-work.patch
+sched-completion-Fix-a-lockup-in-wait_for_completion.patch
# HRTIMERS
-time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch
hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
-hrtimers-prepare-full-preemption.patch
hrtimer-by-timers-by-default-into-the-softirq-context.patch
sched-fair-Make-the-hrtimers-non-hard-again.patch
-hrtimer-Move-schedule_work-call-to-helper-thread.patch
hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch
# POSIX-CPU-TIMERS
@@ -269,7 +231,6 @@ sched-rt-mutex-wakeup.patch
sched-might-sleep-do-not-account-rcu-depth.patch
cond-resched-lock-rt-tweak.patch
sched-disable-ttwu-queue.patch
-sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
# MIGRATE DISABLE AND PER CPU
@@ -280,23 +241,16 @@ ftrace-migrate-disable-tracing.patch
lockdep-no-softirq-accounting-on-rt.patch
# SOFTIRQ
-tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
+softirq-Avoid-a-cancel-dead-lock-in-tasklet-handling.patch
softirq-preempt-fix-3-re.patch
softirq-disable-softirq-stacks-for-rt.patch
-softirq-split-locks.patch
+
net-core-use-local_bh_disable-in-netif_rx_ni.patch
-irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
-softirq-split-timer-softirqs-out-of-ksoftirqd.patch
rtmutex-trylock-is-okay-on-RT.patch
# compile fix due to rtmutex locks
fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
-# FUTEX/RTMUTEX
-rtmutex-futex-prepare-rt.patch
-futex-requeue-pi-fix.patch
-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
-
# RTMUTEX
pid.h-include-atomic.h.patch
arm-include-definition-for-cpumask_t.patch
@@ -326,7 +280,6 @@ locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch
# RCU
rcu-Eliminate-softirq-processing-from-rcutree.patch
-srcu-Remove-srcu_queue_delayed_work_on.patch
srcu-replace-local_irqsave-with-a-locallock.patch
rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
@@ -337,15 +290,11 @@ tty-serial-pl011-warning-about-uninitialized.patch
rt-serial-warn-fix.patch
# FS
-peterz-percpu-rwsem-rt.patch
mm-protect-activate-switch-mm.patch
fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch
fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch
-# X86
-thermal-Defer-thermal-wakups-to-threads.patch
-
# POWER
powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch
@@ -360,9 +309,6 @@ md-raid5-percpu-handling-rt-aware.patch
# CPU CHILL
rt-introduce-cpu-chill.patch
-# block
-block-blk-mq-move-blk_queue_usage_counter_release-in.patch
-
# BLOCK LIVELOCK PREVENTION
block-use-cpu-chill.patch
@@ -371,12 +317,6 @@ fs-dcache-use-cpu-chill-in-trylock-loops.patch
net-use-cpu-chill.patch
fs-dcache-use-swait_queue-instead-of-waitqueue.patch
-# WORKQUEUE more fixes
-workqueue-use-rcu.patch
-workqueue-use-locallock.patch
-work-queue-work-around-irqsafe-timer-optimization.patch
-workqueue-distangle-from-rq-lock.patch
-
# DEBUGOBJECTS
debugobjects-rt.patch
@@ -386,12 +326,8 @@ seqlock-prevent-rt-starvation.patch
# NETWORKING
sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
skbufhead-raw-lock.patch
-net-move-xmit_recursion-to-per-task-variable-on-RT.patch
-net-provide-a-way-to-delegate-processing-a-softirq-t.patch
net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
net-Qdisc-use-a-seqlock-instead-seqcount.patch
-net-add-back-the-missing-serialization-in-ip_send_un.patch
-net-add-a-lock-around-icmp_sk.patch
net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
# irqwork
@@ -419,7 +355,6 @@ scsi-fcoe-rt-aware.patch
x86-crypto-reduce-preempt-disabled-regions.patch
crypto-Reduce-preempt-disabled-regions-more-algos.patch
crypto-limit-more-FPU-enabled-sections.patch
-crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch
crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch
# RANDOM
@@ -427,7 +362,6 @@ panic-disable-random-on-rt.patch
x86-stackprot-no-random-on-rt.patch
powerpc-stackprotector-work-around-stack-guard-init-.patch
random-make-it-work-on-rt.patch
-random-avoid-preempt_disable-ed-section.patch
# HOTPLUG
cpu-hotplug--Implement-CPU-pinning.patch
@@ -467,15 +401,11 @@ drm-i915-disable-tracing-on-RT.patch
drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
# CGROUPS
-cgroups-use-simple-wait-in-css_release.patch
cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
# Security
apparmor-use-a-locallock-instead-preempt_disable.patch
-# WORKQUEUE SIGH
-workqueue-prevent-deadlock-stall.patch
-
# Nice to have
signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
diff --git a/debian/patches-rt/signal-revert-ptrace-preempt-magic.patch b/debian/patches-rt/signal-revert-ptrace-preempt-magic.patch
index 093afdde5..4c84819d8 100644
--- a/debian/patches-rt/signal-revert-ptrace-preempt-magic.patch
+++ b/debian/patches-rt/signal-revert-ptrace-preempt-magic.patch
@@ -1,7 +1,7 @@
Subject: signal: Revert ptrace preempt magic
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 21 Sep 2011 19:57:12 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Upstream commit '53da1d9456fe7f8 fix ptrace slowness' is nothing more
than a bandaid around the ptrace design trainwreck. It's not a
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/signal.c
+++ b/kernel/signal.c
-@@ -2097,15 +2097,7 @@ static void ptrace_stop(int exit_code, i
+@@ -2103,15 +2103,7 @@ static void ptrace_stop(int exit_code, i
if (gstop_done && ptrace_reparented(current))
do_notify_parent_cldstop(current, false, why);
@@ -27,6 +27,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- preempt_disable();
read_unlock(&tasklist_lock);
- preempt_enable_no_resched();
+ cgroup_enter_frozen();
freezable_schedule();
- } else {
- /*
+ cgroup_leave_frozen(true);
diff --git a/debian/patches-rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/debian/patches-rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index 04064e19a..9a7346d4e 100644
--- a/debian/patches-rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/debian/patches-rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 3 Jul 2009 08:44:56 -0500
Subject: signals: Allow rt tasks to cache one sigqueue struct
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
To avoid allocation allow rt tasks to cache one sigqueue struct in
task struct.
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -895,6 +895,8 @@ struct task_struct {
+@@ -889,6 +889,8 @@ struct task_struct {
/* Signal handlers: */
struct signal_struct *signal;
struct sighand_struct *sighand;
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline int valid_signal(unsigned long sig)
--- a/kernel/exit.c
+++ b/kernel/exit.c
-@@ -160,7 +160,7 @@ static void __exit_signal(struct task_st
+@@ -161,7 +161,7 @@ static void __exit_signal(struct task_st
* Do this under ->siglock, we can race with another thread
* doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
*/
@@ -50,7 +50,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -1848,6 +1848,7 @@ static __latent_entropy struct task_stru
+@@ -1932,6 +1932,7 @@ static __latent_entropy struct task_stru
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
@@ -60,15 +60,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
--- a/kernel/signal.c
+++ b/kernel/signal.c
-@@ -19,6 +19,7 @@
+@@ -20,6 +20,7 @@
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/cputime.h>
+#include <linux/sched/rt.h>
+ #include <linux/file.h>
#include <linux/fs.h>
- #include <linux/tty.h>
- #include <linux/binfmts.h>
-@@ -393,13 +394,30 @@ void task_join_group_stop(struct task_st
+ #include <linux/proc_fs.h>
+@@ -398,13 +399,30 @@ void task_join_group_stop(struct task_st
}
}
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
struct sigqueue *q = NULL;
struct user_struct *user;
-@@ -416,7 +434,10 @@ static struct sigqueue *
+@@ -421,7 +439,10 @@ static struct sigqueue *
if (override_rlimit ||
atomic_read(&user->sigpending) <=
task_rlimit(t, RLIMIT_SIGPENDING)) {
@@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else {
print_dropped_signal(sig);
}
-@@ -433,6 +454,13 @@ static struct sigqueue *
+@@ -438,6 +459,13 @@ static struct sigqueue *
return q;
}
@@ -126,7 +126,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
-@@ -442,6 +470,21 @@ static void __sigqueue_free(struct sigqu
+@@ -447,6 +475,21 @@ static void __sigqueue_free(struct sigqu
kmem_cache_free(sigqueue_cachep, q);
}
@@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void flush_sigqueue(struct sigpending *queue)
{
struct sigqueue *q;
-@@ -455,6 +498,21 @@ void flush_sigqueue(struct sigpending *q
+@@ -460,6 +503,21 @@ void flush_sigqueue(struct sigpending *q
}
/*
@@ -170,7 +170,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Flush all pending signals for this kthread.
*/
void flush_signals(struct task_struct *t)
-@@ -578,7 +636,7 @@ static void collect_signal(int sig, stru
+@@ -583,7 +641,7 @@ static void collect_signal(int sig, stru
(info->si_code == SI_TIMER) &&
(info->si_sys_private);
@@ -179,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else {
/*
* Ok, it wasn't in the queue. This must be
-@@ -615,6 +673,8 @@ int dequeue_signal(struct task_struct *t
+@@ -620,6 +678,8 @@ int dequeue_signal(struct task_struct *t
bool resched_timer = false;
int signr;
@@ -188,7 +188,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
-@@ -1750,7 +1810,8 @@ EXPORT_SYMBOL(kill_pid);
+@@ -1756,7 +1816,8 @@ EXPORT_SYMBOL(kill_pid);
*/
struct sigqueue *sigqueue_alloc(void)
{
diff --git a/debian/patches-rt/skbufhead-raw-lock.patch b/debian/patches-rt/skbufhead-raw-lock.patch
index 9d1285602..dc09b39c4 100644
--- a/debian/patches-rt/skbufhead-raw-lock.patch
+++ b/debian/patches-rt/skbufhead-raw-lock.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 12 Jul 2011 15:38:34 +0200
Subject: net: Use skbufhead with raw lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Use the rps lock as rawlock so we can keep irq-off regions. It looks low
latency. However we can't kfree() from this context therefore we defer this
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -3013,6 +3013,7 @@ struct softnet_data {
+@@ -3005,6 +3005,7 @@ struct softnet_data {
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -289,6 +289,7 @@ struct sk_buff_head {
+@@ -285,6 +285,7 @@ struct sk_buff_head {
__u32 qlen;
spinlock_t lock;
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
};
struct sk_buff;
-@@ -1746,6 +1747,12 @@ static inline void skb_queue_head_init(s
+@@ -1812,6 +1813,12 @@ static inline void skb_queue_head_init(s
__skb_queue_head_init(list);
}
@@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -221,14 +221,14 @@ static inline struct hlist_head *dev_ind
+@@ -217,14 +217,14 @@ static inline struct hlist_head *dev_ind
static inline void rps_lock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
}
-@@ -5323,7 +5323,7 @@ static void flush_backlog(struct work_st
+@@ -5318,7 +5318,7 @@ static void flush_backlog(struct work_st
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -75,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
input_queue_head_incr(sd);
}
}
-@@ -5333,11 +5333,14 @@ static void flush_backlog(struct work_st
+@@ -5328,11 +5328,14 @@ static void flush_backlog(struct work_st
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->process_queue);
@@ -91,7 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void flush_all_backlogs(void)
-@@ -5937,7 +5940,9 @@ static int process_backlog(struct napi_s
+@@ -5932,7 +5935,9 @@ static int process_backlog(struct napi_s
while (again) {
struct sk_buff *skb;
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
__netif_receive_skb(skb);
rcu_read_unlock();
-@@ -5945,9 +5950,9 @@ static int process_backlog(struct napi_s
+@@ -5940,9 +5945,9 @@ static int process_backlog(struct napi_s
if (++work >= quota)
return work;
@@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rps_lock(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
-@@ -6412,13 +6417,21 @@ static __latent_entropy void net_rx_acti
+@@ -6407,13 +6412,21 @@ static __latent_entropy void net_rx_acti
unsigned long time_limit = jiffies +
usecs_to_jiffies(netdev_budget_usecs);
int budget = netdev_budget;
@@ -134,7 +134,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for (;;) {
struct napi_struct *n;
-@@ -9436,10 +9449,13 @@ static int dev_cpu_dead(unsigned int old
+@@ -9524,10 +9537,13 @@ static int dev_cpu_dead(unsigned int old
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
@@ -149,7 +149,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return 0;
}
-@@ -9748,8 +9764,9 @@ static int __init net_dev_init(void)
+@@ -9836,8 +9852,9 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
diff --git a/debian/patches-rt/slub-disable-SLUB_CPU_PARTIAL.patch b/debian/patches-rt/slub-disable-SLUB_CPU_PARTIAL.patch
index 096c79f84..69ff444ca 100644
--- a/debian/patches-rt/slub-disable-SLUB_CPU_PARTIAL.patch
+++ b/debian/patches-rt/slub-disable-SLUB_CPU_PARTIAL.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 15 Apr 2015 19:00:47 +0200
Subject: slub: Disable SLUB_CPU_PARTIAL
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915
|in_atomic(): 1, irqs_disabled(): 0, pid: 87, name: rcuop/7
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1731,7 +1731,7 @@ config SLAB_FREELIST_HARDENED
+@@ -1781,7 +1781,7 @@ config SHUFFLE_PAGE_ALLOCATOR
config SLUB_CPU_PARTIAL
default y
diff --git a/debian/patches-rt/slub-enable-irqs-for-no-wait.patch b/debian/patches-rt/slub-enable-irqs-for-no-wait.patch
index 6d01c0e76..3ef056b38 100644
--- a/debian/patches-rt/slub-enable-irqs-for-no-wait.patch
+++ b/debian/patches-rt/slub-enable-irqs-for-no-wait.patch
@@ -1,7 +1,7 @@
Subject: slub: Enable irqs for __GFP_WAIT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 09 Jan 2013 12:08:15 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
SYSTEM_RUNNING might be too late for enabling interrupts. Allocations
with GFP_WAIT can happen before that. So use this as an indicator.
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -1630,10 +1630,18 @@ static struct page *allocate_slab(struct
+@@ -1621,10 +1621,18 @@ static struct page *allocate_slab(struct
void *start, *p, *next;
int idx, order;
bool shuffle;
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_enable();
flags |= s->allocflags;
-@@ -1693,7 +1701,7 @@ static struct page *allocate_slab(struct
+@@ -1684,7 +1692,7 @@ static struct page *allocate_slab(struct
page->frozen = 1;
out:
diff --git a/debian/patches-rt/softirq-Add-preemptible-softirq.patch b/debian/patches-rt/softirq-Add-preemptible-softirq.patch
new file mode 100644
index 000000000..c355f6283
--- /dev/null
+++ b/debian/patches-rt/softirq-Add-preemptible-softirq.patch
@@ -0,0 +1,477 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 20 May 2019 13:09:08 +0200
+Subject: [PATCH] softirq: Add preemptible softirq
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+Add preemptible softirq for RT's needs. By removing the softirq count
+from the preempt counter, the softirq becomes preemptible. A per-CPU
+lock ensures that there is no parallel softirq processing or that
+per-CPU variables are not access in parallel by multiple threads.
+
+local_bh_enable() will process all softirq work that has been raised in
+its BH-disabled section once the BH counter gets to 0.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/bottom_half.h | 5
+ include/linux/interrupt.h | 1
+ include/linux/preempt.h | 17 ++-
+ include/linux/rcupdate.h | 3
+ include/linux/sched.h | 3
+ kernel/softirq.c | 222 +++++++++++++++++++++++++++++++++++++++++++-
+ kernel/time/tick-sched.c | 9 -
+ 7 files changed, 246 insertions(+), 14 deletions(-)
+
+--- a/include/linux/bottom_half.h
++++ b/include/linux/bottom_half.h
+@@ -4,6 +4,10 @@
+
+ #include <linux/preempt.h>
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
++#else
++
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
+ #else
+@@ -13,6 +17,7 @@ static __always_inline void __local_bh_d
+ barrier();
+ }
+ #endif
++#endif
+
+ static inline void local_bh_disable(void)
+ {
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -556,6 +556,7 @@ extern void __raise_softirq_irqoff(unsig
+
+ extern void raise_softirq_irqoff(unsigned int nr);
+ extern void raise_softirq(unsigned int nr);
++extern void softirq_check_pending_idle(void);
+
+ DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
+
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -78,10 +78,8 @@
+ #include <asm/preempt.h>
+
+ #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+ #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
+ | NMI_MASK))
+-
+ /*
+ * Are we doing bottom half or hardware interrupt processing?
+ *
+@@ -96,12 +94,23 @@
+ * should not be used in new code.
+ */
+ #define in_irq() (hardirq_count())
+-#define in_softirq() (softirq_count())
+ #define in_interrupt() (irq_count())
+-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+ #define in_nmi() (preempt_count() & NMI_MASK)
+ #define in_task() (!(preempt_count() & \
+ (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
++#ifdef CONFIG_PREEMPT_RT_FULL
++
++#define softirq_count() ((long)get_current()->softirq_count)
++#define in_softirq() (softirq_count())
++#define in_serving_softirq() (get_current()->softirq_count & SOFTIRQ_OFFSET)
++
++#else
++
++#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
++#define in_softirq() (softirq_count())
++#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
++
++#endif
+
+ /*
+ * The preempt_count offset after preempt_disable();
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -272,7 +272,8 @@ static inline void rcu_preempt_sleep_che
+ #define rcu_sleep_check() \
+ do { \
+ rcu_preempt_sleep_check(); \
+- RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) \
++ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
+ "Illegal context switch in RCU-bh read-side critical section"); \
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
+ "Illegal context switch in RCU-sched read-side critical section"); \
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -923,6 +923,9 @@ struct task_struct {
+ int softirqs_enabled;
+ int softirq_context;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int softirq_count;
++#endif
+
+ #ifdef CONFIG_LOCKDEP
+ # define MAX_LOCK_DEPTH 48UL
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -25,6 +25,9 @@
+ #include <linux/smpboot.h>
+ #include <linux/tick.h>
+ #include <linux/irq.h>
++#ifdef CONFIG_PREEMPT_RT_FULL
++#include <linux/locallock.h>
++#endif
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/irq.h>
+@@ -102,6 +105,98 @@ static bool ksoftirqd_running(unsigned l
+ * softirq and whether we just have bh disabled.
+ */
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static DEFINE_LOCAL_IRQ_LOCK(bh_lock);
++static DEFINE_PER_CPU(long, softirq_counter);
++
++void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
++{
++ unsigned long __maybe_unused flags;
++ long soft_cnt;
++
++ WARN_ON_ONCE(in_irq());
++ if (!in_atomic())
++ local_lock(bh_lock);
++ soft_cnt = this_cpu_inc_return(softirq_counter);
++ WARN_ON_ONCE(soft_cnt == 0);
++ current->softirq_count += SOFTIRQ_DISABLE_OFFSET;
++
++#ifdef CONFIG_TRACE_IRQFLAGS
++ local_irq_save(flags);
++ if (soft_cnt == 1)
++ trace_softirqs_off(ip);
++ local_irq_restore(flags);
++#endif
++}
++EXPORT_SYMBOL(__local_bh_disable_ip);
++
++static void local_bh_disable_rt(void)
++{
++ local_bh_disable();
++}
++
++void _local_bh_enable(void)
++{
++ unsigned long __maybe_unused flags;
++ long soft_cnt;
++
++ soft_cnt = this_cpu_dec_return(softirq_counter);
++ WARN_ON_ONCE(soft_cnt < 0);
++
++#ifdef CONFIG_TRACE_IRQFLAGS
++ local_irq_save(flags);
++ if (soft_cnt == 0)
++ trace_softirqs_on(_RET_IP_);
++ local_irq_restore(flags);
++#endif
++
++ current->softirq_count -= SOFTIRQ_DISABLE_OFFSET;
++ if (!in_atomic())
++ local_unlock(bh_lock);
++}
++
++void _local_bh_enable_rt(void)
++{
++ _local_bh_enable();
++}
++
++void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
++{
++ u32 pending;
++ long count;
++
++ WARN_ON_ONCE(in_irq());
++ lockdep_assert_irqs_enabled();
++
++ local_irq_disable();
++ count = this_cpu_read(softirq_counter);
++
++ if (unlikely(count == 1)) {
++ pending = local_softirq_pending();
++ if (pending && !ksoftirqd_running(pending)) {
++ if (!in_atomic())
++ __do_softirq();
++ else
++ wakeup_softirqd();
++ }
++ trace_softirqs_on(ip);
++ }
++ count = this_cpu_dec_return(softirq_counter);
++ WARN_ON_ONCE(count < 0);
++ local_irq_enable();
++
++ if (!in_atomic())
++ local_unlock(bh_lock);
++
++ current->softirq_count -= SOFTIRQ_DISABLE_OFFSET;
++ preempt_check_resched();
++}
++EXPORT_SYMBOL(__local_bh_enable_ip);
++
++#else
++static void local_bh_disable_rt(void) { }
++static void _local_bh_enable_rt(void) { }
++
+ /*
+ * This one is for softirq.c-internal use,
+ * where hardirqs are disabled legitimately:
+@@ -196,6 +291,7 @@ void __local_bh_enable_ip(unsigned long
+ preempt_check_resched();
+ }
+ EXPORT_SYMBOL(__local_bh_enable_ip);
++#endif
+
+ /*
+ * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
+@@ -266,7 +362,11 @@ asmlinkage __visible void __softirq_entr
+ pending = local_softirq_pending();
+ account_irq_enter_time(current);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->softirq_count |= SOFTIRQ_OFFSET;
++#else
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
++#endif
+ in_hardirq = lockdep_softirq_start();
+
+ restart:
+@@ -300,9 +400,10 @@ asmlinkage __visible void __softirq_entr
+ h++;
+ pending >>= softirq_bit;
+ }
+-
++#ifndef CONFIG_PREEMPT_RT_FULL
+ if (__this_cpu_read(ksoftirqd) == current)
+ rcu_softirq_qs();
++#endif
+ local_irq_disable();
+
+ pending = local_softirq_pending();
+@@ -316,11 +417,16 @@ asmlinkage __visible void __softirq_entr
+
+ lockdep_softirq_end(in_hardirq);
+ account_irq_exit_time(current);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->softirq_count &= ~SOFTIRQ_OFFSET;
++#else
+ __local_bh_enable(SOFTIRQ_OFFSET);
++#endif
+ WARN_ON_ONCE(in_interrupt());
+ current_restore_flags(old_flags, PF_MEMALLOC);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ asmlinkage __visible void do_softirq(void)
+ {
+ __u32 pending;
+@@ -338,6 +444,7 @@ asmlinkage __visible void do_softirq(voi
+
+ local_irq_restore(flags);
+ }
++#endif
+
+ /*
+ * Enter an interrupt context.
+@@ -358,6 +465,16 @@ void irq_enter(void)
+ __irq_enter();
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++
++static inline void invoke_softirq(void)
++{
++ if (this_cpu_read(softirq_counter) == 0)
++ wakeup_softirqd();
++}
++
++#else
++
+ static inline void invoke_softirq(void)
+ {
+ if (ksoftirqd_running(local_softirq_pending()))
+@@ -383,6 +500,7 @@ static inline void invoke_softirq(void)
+ wakeup_softirqd();
+ }
+ }
++#endif
+
+ static inline void tick_irq_exit(void)
+ {
+@@ -420,6 +538,27 @@ void irq_exit(void)
+ /*
+ * This function must run with irqs disabled!
+ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++void raise_softirq_irqoff(unsigned int nr)
++{
++ __raise_softirq_irqoff(nr);
++
++ /*
++ * If we're in an hard interrupt we let irq return code deal
++ * with the wakeup of ksoftirqd.
++ */
++ if (in_irq())
++ return;
++ /*
++ * If were are not in BH-disabled section then we have to wake
++ * ksoftirqd.
++ */
++ if (this_cpu_read(softirq_counter) == 0)
++ wakeup_softirqd();
++}
++
++#else
++
+ inline void raise_softirq_irqoff(unsigned int nr)
+ {
+ __raise_softirq_irqoff(nr);
+@@ -437,6 +576,8 @@ inline void raise_softirq_irqoff(unsigne
+ wakeup_softirqd();
+ }
+
++#endif
++
+ void raise_softirq(unsigned int nr)
+ {
+ unsigned long flags;
+@@ -594,6 +735,7 @@ static int ksoftirqd_should_run(unsigned
+
+ static void run_ksoftirqd(unsigned int cpu)
+ {
++ local_bh_disable_rt();
+ local_irq_disable();
+ if (local_softirq_pending()) {
+ /*
+@@ -602,10 +744,12 @@ static void run_ksoftirqd(unsigned int c
+ */
+ __do_softirq();
+ local_irq_enable();
++ _local_bh_enable_rt();
+ cond_resched();
+ return;
+ }
+ local_irq_enable();
++ _local_bh_enable_rt();
+ }
+
+ #ifdef CONFIG_HOTPLUG_CPU
+@@ -679,6 +823,13 @@ static struct smp_hotplug_thread softirq
+
+ static __init int spawn_ksoftirqd(void)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int cpu;
++
++ for_each_possible_cpu(cpu)
++ lockdep_set_novalidate_class(per_cpu_ptr(&bh_lock.lock, cpu));
++#endif
++
+ cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
+ takeover_tasklets);
+ BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
+@@ -687,6 +838,75 @@ static __init int spawn_ksoftirqd(void)
+ }
+ early_initcall(spawn_ksoftirqd);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++
++/*
++ * On preempt-rt a softirq running context might be blocked on a
++ * lock. There might be no other runnable task on this CPU because the
++ * lock owner runs on some other CPU. So we have to go into idle with
++ * the pending bit set. Therefor we need to check this otherwise we
++ * warn about false positives which confuses users and defeats the
++ * whole purpose of this test.
++ *
++ * This code is called with interrupts disabled.
++ */
++void softirq_check_pending_idle(void)
++{
++ struct task_struct *tsk = __this_cpu_read(ksoftirqd);
++ static int rate_limit;
++ bool okay = false;
++ u32 warnpending;
++
++ if (rate_limit >= 10)
++ return;
++
++ warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
++ if (!warnpending)
++ return;
++
++ if (!tsk)
++ return;
++ /*
++ * If ksoftirqd is blocked on a lock then we may go idle with pending
++ * softirq.
++ */
++ raw_spin_lock(&tsk->pi_lock);
++ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING ||
++ (tsk->state == TASK_UNINTERRUPTIBLE && tsk->sleeping_lock)) {
++ okay = true;
++ }
++ raw_spin_unlock(&tsk->pi_lock);
++ if (okay)
++ return;
++ /*
++ * The softirq lock is held in non-atomic context and the owner is
++ * blocking on a lock. It will schedule softirqs once the counter goes
++ * back to zero.
++ */
++ if (this_cpu_read(softirq_counter) > 0)
++ return;
++
++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
++ warnpending);
++ rate_limit++;
++}
++
++#else
++
++void softirq_check_pending_idle(void)
++{
++ static int ratelimit;
++
++ if (ratelimit < 10 &&
++ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
++ pr_warn("NOHZ: local_softirq_pending %02x\n",
++ (unsigned int) local_softirq_pending());
++ ratelimit++;
++ }
++}
++
++#endif
++
+ /*
+ * [ These __weak aliases are kept in a separate compilation unit, so that
+ * GCC does not inline them incorrectly. ]
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -893,14 +893,7 @@ static bool can_stop_idle_tick(int cpu,
+ return false;
+
+ if (unlikely(local_softirq_pending())) {
+- static int ratelimit;
+-
+- if (ratelimit < 10 &&
+- (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
+- pr_warn("NOHZ: local_softirq_pending %02x\n",
+- (unsigned int) local_softirq_pending());
+- ratelimit++;
+- }
++ softirq_check_pending_idle();
+ return false;
+ }
+
diff --git a/debian/patches-rt/softirq-Avoid-a-cancel-dead-lock-in-tasklet-handling.patch b/debian/patches-rt/softirq-Avoid-a-cancel-dead-lock-in-tasklet-handling.patch
new file mode 100644
index 000000000..aa132bcfa
--- /dev/null
+++ b/debian/patches-rt/softirq-Avoid-a-cancel-dead-lock-in-tasklet-handling.patch
@@ -0,0 +1,49 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Sat, 22 Jun 2019 00:09:22 +0200
+Subject: [PATCH] softirq: Avoid a cancel dead-lock in tasklet handling due to
+ preemptible-softirq
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+A pending / active tasklet which is preempted by a task on the same CPU
+will spin indefinitely becauase the tasklet makes no progress.
+To avoid this deadlock we can disable BH which will acquire the
+softirq-lock which will force the completion of the softirq and so the
+tasklet.
+The BH off/on in tasklet_kill() will force tasklets which are not yet
+running but scheduled (because ksoftirqd was preempted before it could
+start the tasklet).
+The BH off/on in tasklet_unlock_wait() will force tasklets which got
+preempted while running.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/interrupt.h | 5 ++++-
+ kernel/softirq.c | 3 ++-
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -625,7 +625,10 @@ static inline void tasklet_unlock(struct
+
+ static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+ {
+- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
++ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
++ local_bh_disable();
++ local_bh_enable();
++ }
+ }
+ #else
+ #define tasklet_trylock(t) 1
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -705,7 +705,8 @@ void tasklet_kill(struct tasklet_struct
+
+ while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
+ do {
+- yield();
++ local_bh_disable();
++ local_bh_enable();
+ } while (test_bit(TASKLET_STATE_SCHED, &t->state));
+ }
+ tasklet_unlock_wait(t);
diff --git a/debian/patches-rt/softirq-disable-softirq-stacks-for-rt.patch b/debian/patches-rt/softirq-disable-softirq-stacks-for-rt.patch
index e8cdde176..05fc60f50 100644
--- a/debian/patches-rt/softirq-disable-softirq-stacks-for-rt.patch
+++ b/debian/patches-rt/softirq-disable-softirq-stacks-for-rt.patch
@@ -1,7 +1,7 @@
Subject: softirq: Disable softirq stacks for RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 18 Jul 2011 13:59:17 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Disable extra stacks for softirqs. We want to preempt softirqs and
having them on special IRQ-stack does not make this easier.
@@ -20,17 +20,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
-@@ -766,6 +766,7 @@ void irq_ctx_init(void)
- }
- }
+@@ -679,10 +679,12 @@ void *mcheckirq_ctx[NR_CPUS] __read_most
+ void *softirq_ctx[NR_CPUS] __read_mostly;
+ void *hardirq_ctx[NR_CPUS] __read_mostly;
+#ifndef CONFIG_PREEMPT_RT_FULL
void do_softirq_own_stack(void)
{
- struct thread_info *curtp, *irqtp;
-@@ -783,6 +784,7 @@ void do_softirq_own_stack(void)
- if (irqtp->flags)
- set_bits(irqtp->flags, &curtp->flags);
+ call_do_softirq(softirq_ctx[smp_processor_id()]);
}
+#endif
@@ -38,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
-@@ -42,6 +42,7 @@
+@@ -37,6 +37,7 @@
* We store the saved ksp_limit in the unused part
* of the STACK_FRAME_OVERHEAD
*/
@@ -46,17 +43,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
_GLOBAL(call_do_softirq)
mflr r0
stw r0,4(r1)
-@@ -58,6 +59,7 @@
+@@ -52,6 +53,7 @@
stw r10,THREAD+KSP_LIMIT(r2)
mtlr r0
blr
+#endif
/*
- * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
+ * void call_do_irq(struct pt_regs *regs, void *sp);
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
-@@ -32,6 +32,7 @@
+@@ -27,6 +27,7 @@
.text
@@ -64,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
_GLOBAL(call_do_softirq)
mflr r0
std r0,16(r1)
-@@ -42,6 +43,7 @@
+@@ -37,6 +38,7 @@
ld r0,16(r1)
mtlr r0
blr
@@ -110,7 +107,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void fixup_irqs(void)
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
-@@ -1019,6 +1019,7 @@ EXPORT_SYMBOL(native_load_gs_index)
+@@ -1032,6 +1032,7 @@ EXPORT_SYMBOL(native_load_gs_index)
jmp 2b
.previous
@@ -118,7 +115,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(do_softirq_own_stack)
pushq %rbp
-@@ -1029,6 +1030,7 @@ ENTRY(do_softirq_own_stack)
+@@ -1042,6 +1043,7 @@ ENTRY(do_softirq_own_stack)
leaveq
ret
ENDPROC(do_softirq_own_stack)
@@ -128,15 +125,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
-@@ -130,6 +130,7 @@ void irq_ctx_init(int cpu)
- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
+@@ -131,6 +131,7 @@ int irq_init_percpu_irqstack(unsigned in
+ return 0;
}
+#ifndef CONFIG_PREEMPT_RT_FULL
void do_softirq_own_stack(void)
{
struct irq_stack *irqstk;
-@@ -146,6 +147,7 @@ void do_softirq_own_stack(void)
+@@ -147,6 +148,7 @@ void do_softirq_own_stack(void)
call_on_stack(__do_softirq, isp);
}
@@ -146,7 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -521,7 +521,7 @@ struct softirq_action
+@@ -545,7 +545,7 @@ struct softirq_action
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
diff --git a/debian/patches-rt/softirq-preempt-fix-3-re.patch b/debian/patches-rt/softirq-preempt-fix-3-re.patch
index 8473da66d..23e1a746a 100644
--- a/debian/patches-rt/softirq-preempt-fix-3-re.patch
+++ b/debian/patches-rt/softirq-preempt-fix-3-re.patch
@@ -1,7 +1,7 @@
Subject: softirq: Check preemption after reenabling interrupts
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 13 Nov 2011 17:17:09 +0100 (CET)
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
raise_softirq_irqoff() disables interrupts and wakes the softirq
daemon, but after reenabling interrupts there is no preemption check,
@@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static __init int blk_softirq_init(void)
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -184,8 +184,10 @@ do { \
+@@ -193,8 +193,10 @@ do { \
#ifdef CONFIG_PREEMPT_RT_BASE
# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
@@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
-@@ -272,6 +274,7 @@ do { \
+@@ -281,6 +283,7 @@ do { \
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()
@@ -72,7 +72,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+++ b/lib/irq_poll.c
@@ -37,6 +37,7 @@ void irq_poll_sched(struct irq_poll *iop
list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
- __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+ raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
local_irq_restore(flags);
+ preempt_check_resched_rt();
}
@@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -2754,6 +2754,7 @@ static void __netif_reschedule(struct Qd
+@@ -2750,6 +2750,7 @@ static void __netif_reschedule(struct Qd
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -120,7 +120,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void __netif_schedule(struct Qdisc *q)
-@@ -2816,6 +2817,7 @@ void __dev_kfree_skb_irq(struct sk_buff
+@@ -2812,6 +2813,7 @@ void __dev_kfree_skb_irq(struct sk_buff
__this_cpu_write(softnet_data.completion_queue, skb);
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -128,7 +128,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__dev_kfree_skb_irq);
-@@ -4295,6 +4297,7 @@ static int enqueue_to_backlog(struct sk_
+@@ -4294,6 +4296,7 @@ static int enqueue_to_backlog(struct sk_
rps_unlock(sd);
local_irq_restore(flags);
@@ -136,7 +136,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -5899,12 +5902,14 @@ static void net_rps_action_and_irq_enabl
+@@ -5894,12 +5897,14 @@ static void net_rps_action_and_irq_enabl
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -151,7 +151,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -5982,6 +5987,7 @@ void __napi_schedule(struct napi_struct
+@@ -5977,6 +5982,7 @@ void __napi_schedule(struct napi_struct
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -159,7 +159,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -9418,6 +9424,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -9506,6 +9512,7 @@ static int dev_cpu_dead(unsigned int old
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/debian/patches-rt/softirq-split-locks.patch b/debian/patches-rt/softirq-split-locks.patch
deleted file mode 100644
index 1a1b86405..000000000
--- a/debian/patches-rt/softirq-split-locks.patch
+++ /dev/null
@@ -1,844 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Thu, 04 Oct 2012 14:20:47 +0100
-Subject: softirq: Split softirq locks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-The 3.x RT series removed the split softirq implementation in favour
-of pushing softirq processing into the context of the thread which
-raised it. Though this prevents us from handling the various softirqs
-at different priorities. Now instead of reintroducing the split
-softirq threads we split the locks which serialize the softirq
-processing.
-
-If a softirq is raised in context of a thread, then the softirq is
-noted on a per thread field, if the thread is in a bh disabled
-region. If the softirq is raised from hard interrupt context, then the
-bit is set in the flag field of ksoftirqd and ksoftirqd is invoked.
-When a thread leaves a bh disabled region, then it tries to execute
-the softirqs which have been raised in its own context. It acquires
-the per softirq / per cpu lock for the softirq and then checks,
-whether the softirq is still pending in the per cpu
-local_softirq_pending() field. If yes, it runs the softirq. If no,
-then some other task executed it already. This allows for zero config
-softirq elevation in the context of user space tasks or interrupt
-threads.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- include/linux/bottom_half.h | 34 ++
- include/linux/interrupt.h | 15 +
- include/linux/preempt.h | 15 +
- include/linux/sched.h | 3
- init/main.c | 1
- kernel/softirq.c | 521 +++++++++++++++++++++++++++++++++++++-------
- kernel/time/tick-sched.c | 9
- 7 files changed, 507 insertions(+), 91 deletions(-)
-
---- a/include/linux/bottom_half.h
-+++ b/include/linux/bottom_half.h
-@@ -4,6 +4,39 @@
-
- #include <linux/preempt.h>
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+
-+extern void __local_bh_disable(void);
-+extern void _local_bh_enable(void);
-+extern void __local_bh_enable(void);
-+
-+static inline void local_bh_disable(void)
-+{
-+ __local_bh_disable();
-+}
-+
-+static inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
-+{
-+ __local_bh_disable();
-+}
-+
-+static inline void local_bh_enable(void)
-+{
-+ __local_bh_enable();
-+}
-+
-+static inline void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
-+{
-+ __local_bh_enable();
-+}
-+
-+static inline void local_bh_enable_ip(unsigned long ip)
-+{
-+ __local_bh_enable();
-+}
-+
-+#else
-+
- #ifdef CONFIG_TRACE_IRQFLAGS
- extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
- #else
-@@ -31,5 +64,6 @@ static inline void local_bh_enable(void)
- {
- __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
- }
-+#endif
-
- #endif /* _LINUX_BH_H */
---- a/include/linux/interrupt.h
-+++ b/include/linux/interrupt.h
-@@ -518,10 +518,11 @@ struct softirq_action
- void (*action)(struct softirq_action *);
- };
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- asmlinkage void do_softirq(void);
- asmlinkage void __do_softirq(void);
--
--#if defined(__ARCH_HAS_DO_SOFTIRQ) && !defined(CONFIG_PREEMPT_RT_FULL)
-+static inline void thread_do_softirq(void) { do_softirq(); }
-+#ifdef __ARCH_HAS_DO_SOFTIRQ
- void do_softirq_own_stack(void);
- #else
- static inline void do_softirq_own_stack(void)
-@@ -529,6 +530,9 @@ static inline void do_softirq_own_stack(
- __do_softirq();
- }
- #endif
-+#else
-+extern void thread_do_softirq(void);
-+#endif
-
- extern void open_softirq(int nr, void (*action)(struct softirq_action *));
- extern void softirq_init(void);
-@@ -536,6 +540,7 @@ extern void __raise_softirq_irqoff(unsig
-
- extern void raise_softirq_irqoff(unsigned int nr);
- extern void raise_softirq(unsigned int nr);
-+extern void softirq_check_pending_idle(void);
-
- DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
-
-@@ -653,6 +658,12 @@ extern void tasklet_kill_immediate(struc
- extern void tasklet_init(struct tasklet_struct *t,
- void (*func)(unsigned long), unsigned long data);
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+extern void softirq_early_init(void);
-+#else
-+static inline void softirq_early_init(void) { }
-+#endif
-+
- struct tasklet_hrtimer {
- struct hrtimer timer;
- struct tasklet_struct tasklet;
---- a/include/linux/preempt.h
-+++ b/include/linux/preempt.h
-@@ -51,7 +51,11 @@
- #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
- #define NMI_OFFSET (1UL << NMI_SHIFT)
-
--#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+# define SOFTIRQ_DISABLE_OFFSET (0)
-+#else
-+# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
-+#endif
-
- #define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
-
-@@ -78,9 +82,15 @@
- #include <asm/preempt.h>
-
- #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
--#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
- #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
- | NMI_MASK))
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
-+# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-+#else
-+# define softirq_count() ((unsigned long)current->softirq_nestcnt)
-+extern int in_serving_softirq(void);
-+#endif
-
- /*
- * Are we doing bottom half or hardware interrupt processing?
-@@ -98,7 +108,6 @@
- #define in_irq() (hardirq_count())
- #define in_softirq() (softirq_count())
- #define in_interrupt() (irq_count())
--#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
- #define in_nmi() (preempt_count() & NMI_MASK)
- #define in_task() (!(preempt_count() & \
- (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -1194,6 +1194,8 @@ struct task_struct {
- #endif
- #ifdef CONFIG_PREEMPT_RT_BASE
- struct rcu_head put_rcu;
-+ int softirq_nestcnt;
-+ unsigned int softirqs_raised;
- #endif
- #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- unsigned long task_state_change;
-@@ -1396,6 +1398,7 @@ extern struct pid *cad_pid;
- /*
- * Per process flags
- */
-+#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */
- #define PF_IDLE 0x00000002 /* I am an IDLE thread */
- #define PF_EXITING 0x00000004 /* Getting shut down */
- #define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */
---- a/init/main.c
-+++ b/init/main.c
-@@ -567,6 +567,7 @@ asmlinkage __visible void __init start_k
- setup_command_line(command_line);
- setup_nr_cpu_ids();
- setup_per_cpu_areas();
-+ softirq_early_init();
- smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
- boot_cpu_hotplug_init();
-
---- a/kernel/softirq.c
-+++ b/kernel/softirq.c
-@@ -26,7 +26,9 @@
- #include <linux/smp.h>
- #include <linux/smpboot.h>
- #include <linux/tick.h>
-+#include <linux/locallock.h>
- #include <linux/irq.h>
-+#include <linux/sched/types.h>
-
- #define CREATE_TRACE_POINTS
- #include <trace/events/irq.h>
-@@ -63,6 +65,126 @@ const char * const softirq_to_name[NR_SO
- "TASKLET", "SCHED", "HRTIMER", "RCU"
- };
-
-+#ifdef CONFIG_NO_HZ_COMMON
-+# ifdef CONFIG_PREEMPT_RT_FULL
-+
-+struct softirq_runner {
-+ struct task_struct *runner[NR_SOFTIRQS];
-+};
-+
-+static DEFINE_PER_CPU(struct softirq_runner, softirq_runners);
-+
-+static inline void softirq_set_runner(unsigned int sirq)
-+{
-+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
-+
-+ sr->runner[sirq] = current;
-+}
-+
-+static inline void softirq_clr_runner(unsigned int sirq)
-+{
-+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
-+
-+ sr->runner[sirq] = NULL;
-+}
-+
-+static bool softirq_check_runner_tsk(struct task_struct *tsk,
-+ unsigned int *pending)
-+{
-+ bool ret = false;
-+
-+ if (!tsk)
-+ return ret;
-+
-+ /*
-+ * The wakeup code in rtmutex.c wakes up the task
-+ * _before_ it sets pi_blocked_on to NULL under
-+ * tsk->pi_lock. So we need to check for both: state
-+ * and pi_blocked_on.
-+ * The test against UNINTERRUPTIBLE + ->sleeping_lock is in case the
-+ * task does cpu_chill().
-+ */
-+ raw_spin_lock(&tsk->pi_lock);
-+ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING ||
-+ (tsk->state == TASK_UNINTERRUPTIBLE && tsk->sleeping_lock)) {
-+ /* Clear all bits pending in that task */
-+ *pending &= ~(tsk->softirqs_raised);
-+ ret = true;
-+ }
-+ raw_spin_unlock(&tsk->pi_lock);
-+
-+ return ret;
-+}
-+
-+/*
-+ * On preempt-rt a softirq running context might be blocked on a
-+ * lock. There might be no other runnable task on this CPU because the
-+ * lock owner runs on some other CPU. So we have to go into idle with
-+ * the pending bit set. Therefor we need to check this otherwise we
-+ * warn about false positives which confuses users and defeats the
-+ * whole purpose of this test.
-+ *
-+ * This code is called with interrupts disabled.
-+ */
-+void softirq_check_pending_idle(void)
-+{
-+ struct task_struct *tsk;
-+ static int rate_limit;
-+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
-+ u32 warnpending;
-+ int i;
-+
-+ if (rate_limit >= 10)
-+ return;
-+
-+ warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
-+ if (!warnpending)
-+ return;
-+ for (i = 0; i < NR_SOFTIRQS; i++) {
-+ tsk = sr->runner[i];
-+
-+ if (softirq_check_runner_tsk(tsk, &warnpending))
-+ warnpending &= ~(1 << i);
-+ }
-+
-+ if (warnpending) {
-+ tsk = __this_cpu_read(ksoftirqd);
-+ softirq_check_runner_tsk(tsk, &warnpending);
-+ }
-+
-+ if (warnpending) {
-+ tsk = __this_cpu_read(ktimer_softirqd);
-+ softirq_check_runner_tsk(tsk, &warnpending);
-+ }
-+
-+ if (warnpending) {
-+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
-+ warnpending);
-+ rate_limit++;
-+ }
-+}
-+# else
-+/*
-+ * On !PREEMPT_RT we just printk rate limited:
-+ */
-+void softirq_check_pending_idle(void)
-+{
-+ static int ratelimit;
-+
-+ if (ratelimit < 10 &&
-+ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
-+ pr_warn("NOHZ: local_softirq_pending %02x\n",
-+ (unsigned int) local_softirq_pending());
-+ ratelimit++;
-+ }
-+}
-+# endif
-+
-+#else /* !CONFIG_NO_HZ_COMMON */
-+static inline void softirq_set_runner(unsigned int sirq) { }
-+static inline void softirq_clr_runner(unsigned int sirq) { }
-+#endif
-+
- /*
- * we cannot loop indefinitely here to avoid userspace starvation,
- * but we also don't want to introduce a worst case 1/HZ latency
-@@ -78,6 +200,27 @@ static void wakeup_softirqd(void)
- wake_up_process(tsk);
- }
-
-+static void handle_softirq(unsigned int vec_nr)
-+{
-+ struct softirq_action *h = softirq_vec + vec_nr;
-+ int prev_count;
-+
-+ prev_count = preempt_count();
-+
-+ kstat_incr_softirqs_this_cpu(vec_nr);
-+
-+ trace_softirq_entry(vec_nr);
-+ h->action(h);
-+ trace_softirq_exit(vec_nr);
-+ if (unlikely(prev_count != preempt_count())) {
-+ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
-+ vec_nr, softirq_to_name[vec_nr], h->action,
-+ prev_count, preempt_count());
-+ preempt_count_set(prev_count);
-+ }
-+}
-+
-+#ifndef CONFIG_PREEMPT_RT_FULL
- /*
- * If ksoftirqd is scheduled, we do not want to process pending softirqs
- * right now. Let ksoftirqd handle this at its own rate, to get fairness,
-@@ -93,6 +236,48 @@ static bool ksoftirqd_running(unsigned l
- return tsk && (tsk->state == TASK_RUNNING);
- }
-
-+static inline int ksoftirqd_softirq_pending(void)
-+{
-+ return local_softirq_pending();
-+}
-+
-+static void handle_pending_softirqs(u32 pending)
-+{
-+ struct softirq_action *h = softirq_vec;
-+ int softirq_bit;
-+
-+ local_irq_enable();
-+
-+ h = softirq_vec;
-+
-+ while ((softirq_bit = ffs(pending))) {
-+ unsigned int vec_nr;
-+
-+ h += softirq_bit - 1;
-+ vec_nr = h - softirq_vec;
-+ handle_softirq(vec_nr);
-+
-+ h++;
-+ pending >>= softirq_bit;
-+ }
-+
-+ if (__this_cpu_read(ksoftirqd) == current)
-+ rcu_softirq_qs();
-+ local_irq_disable();
-+}
-+
-+static void run_ksoftirqd(unsigned int cpu)
-+{
-+ local_irq_disable();
-+ if (ksoftirqd_softirq_pending()) {
-+ __do_softirq();
-+ local_irq_enable();
-+ cond_resched();
-+ return;
-+ }
-+ local_irq_enable();
-+}
-+
- /*
- * preempt_count and SOFTIRQ_OFFSET usage:
- * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
-@@ -252,10 +437,8 @@ asmlinkage __visible void __softirq_entr
- unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
- unsigned long old_flags = current->flags;
- int max_restart = MAX_SOFTIRQ_RESTART;
-- struct softirq_action *h;
- bool in_hardirq;
- __u32 pending;
-- int softirq_bit;
-
- /*
- * Mask out PF_MEMALLOC as the current task context is borrowed for the
-@@ -274,37 +457,7 @@ asmlinkage __visible void __softirq_entr
- /* Reset the pending bitmask before enabling irqs */
- set_softirq_pending(0);
-
-- local_irq_enable();
--
-- h = softirq_vec;
--
-- while ((softirq_bit = ffs(pending))) {
-- unsigned int vec_nr;
-- int prev_count;
--
-- h += softirq_bit - 1;
--
-- vec_nr = h - softirq_vec;
-- prev_count = preempt_count();
--
-- kstat_incr_softirqs_this_cpu(vec_nr);
--
-- trace_softirq_entry(vec_nr);
-- h->action(h);
-- trace_softirq_exit(vec_nr);
-- if (unlikely(prev_count != preempt_count())) {
-- pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
-- vec_nr, softirq_to_name[vec_nr], h->action,
-- prev_count, preempt_count());
-- preempt_count_set(prev_count);
-- }
-- h++;
-- pending >>= softirq_bit;
-- }
--
-- if (__this_cpu_read(ksoftirqd) == current)
-- rcu_softirq_qs();
-- local_irq_disable();
-+ handle_pending_softirqs(pending);
-
- pending = local_softirq_pending();
- if (pending) {
-@@ -341,6 +494,248 @@ asmlinkage __visible void do_softirq(voi
- }
-
- /*
-+ * This function must run with irqs disabled!
-+ */
-+void raise_softirq_irqoff(unsigned int nr)
-+{
-+ __raise_softirq_irqoff(nr);
-+
-+ /*
-+ * If we're in an interrupt or softirq, we're done
-+ * (this also catches softirq-disabled code). We will
-+ * actually run the softirq once we return from
-+ * the irq or softirq.
-+ *
-+ * Otherwise we wake up ksoftirqd to make sure we
-+ * schedule the softirq soon.
-+ */
-+ if (!in_interrupt())
-+ wakeup_softirqd();
-+}
-+
-+void __raise_softirq_irqoff(unsigned int nr)
-+{
-+ trace_softirq_raise(nr);
-+ or_softirq_pending(1UL << nr);
-+}
-+
-+static inline void local_bh_disable_nort(void) { local_bh_disable(); }
-+static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
-+static void ksoftirqd_set_sched_params(unsigned int cpu) { }
-+static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { }
-+
-+#else /* !PREEMPT_RT_FULL */
-+
-+/*
-+ * On RT we serialize softirq execution with a cpu local lock per softirq
-+ */
-+static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks);
-+
-+void __init softirq_early_init(void)
-+{
-+ int i;
-+
-+ for (i = 0; i < NR_SOFTIRQS; i++)
-+ local_irq_lock_init(local_softirq_locks[i]);
-+}
-+
-+static void lock_softirq(int which)
-+{
-+ local_lock(local_softirq_locks[which]);
-+}
-+
-+static void unlock_softirq(int which)
-+{
-+ local_unlock(local_softirq_locks[which]);
-+}
-+
-+static void do_single_softirq(int which)
-+{
-+ unsigned long old_flags = current->flags;
-+
-+ current->flags &= ~PF_MEMALLOC;
-+ vtime_account_irq_enter(current);
-+ current->flags |= PF_IN_SOFTIRQ;
-+ lockdep_softirq_enter();
-+ local_irq_enable();
-+ handle_softirq(which);
-+ local_irq_disable();
-+ lockdep_softirq_exit();
-+ current->flags &= ~PF_IN_SOFTIRQ;
-+ vtime_account_irq_enter(current);
-+ current_restore_flags(old_flags, PF_MEMALLOC);
-+}
-+
-+/*
-+ * Called with interrupts disabled. Process softirqs which were raised
-+ * in current context (or on behalf of ksoftirqd).
-+ */
-+static void do_current_softirqs(void)
-+{
-+ while (current->softirqs_raised) {
-+ int i = __ffs(current->softirqs_raised);
-+ unsigned int pending, mask = (1U << i);
-+
-+ current->softirqs_raised &= ~mask;
-+ local_irq_enable();
-+
-+ /*
-+ * If the lock is contended, we boost the owner to
-+ * process the softirq or leave the critical section
-+ * now.
-+ */
-+ lock_softirq(i);
-+ local_irq_disable();
-+ softirq_set_runner(i);
-+ /*
-+ * Check with the local_softirq_pending() bits,
-+ * whether we need to process this still or if someone
-+ * else took care of it.
-+ */
-+ pending = local_softirq_pending();
-+ if (pending & mask) {
-+ set_softirq_pending(pending & ~mask);
-+ do_single_softirq(i);
-+ }
-+ softirq_clr_runner(i);
-+ WARN_ON(current->softirq_nestcnt != 1);
-+ local_irq_enable();
-+ unlock_softirq(i);
-+ local_irq_disable();
-+ }
-+}
-+
-+void __local_bh_disable(void)
-+{
-+ if (++current->softirq_nestcnt == 1)
-+ migrate_disable();
-+}
-+EXPORT_SYMBOL(__local_bh_disable);
-+
-+void __local_bh_enable(void)
-+{
-+ if (WARN_ON(current->softirq_nestcnt == 0))
-+ return;
-+
-+ local_irq_disable();
-+ if (current->softirq_nestcnt == 1 && current->softirqs_raised)
-+ do_current_softirqs();
-+ local_irq_enable();
-+
-+ if (--current->softirq_nestcnt == 0)
-+ migrate_enable();
-+}
-+EXPORT_SYMBOL(__local_bh_enable);
-+
-+int in_serving_softirq(void)
-+{
-+ return current->flags & PF_IN_SOFTIRQ;
-+}
-+EXPORT_SYMBOL(in_serving_softirq);
-+
-+/* Called with preemption disabled */
-+static void run_ksoftirqd(unsigned int cpu)
-+{
-+ local_irq_disable();
-+ current->softirq_nestcnt++;
-+
-+ do_current_softirqs();
-+ current->softirq_nestcnt--;
-+ local_irq_enable();
-+ cond_resched();
-+}
-+
-+/*
-+ * Called from netif_rx_ni(). Preemption enabled, but migration
-+ * disabled. So the cpu can't go away under us.
-+ */
-+void thread_do_softirq(void)
-+{
-+ if (!in_serving_softirq() && current->softirqs_raised) {
-+ current->softirq_nestcnt++;
-+ do_current_softirqs();
-+ current->softirq_nestcnt--;
-+ }
-+}
-+
-+static void do_raise_softirq_irqoff(unsigned int nr)
-+{
-+ trace_softirq_raise(nr);
-+ or_softirq_pending(1UL << nr);
-+
-+ /*
-+ * If we are not in a hard interrupt and inside a bh disabled
-+ * region, we simply raise the flag on current. local_bh_enable()
-+ * will make sure that the softirq is executed. Otherwise we
-+ * delegate it to ksoftirqd.
-+ */
-+ if (!in_irq() && current->softirq_nestcnt)
-+ current->softirqs_raised |= (1U << nr);
-+ else if (__this_cpu_read(ksoftirqd))
-+ __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr);
-+}
-+
-+void __raise_softirq_irqoff(unsigned int nr)
-+{
-+ do_raise_softirq_irqoff(nr);
-+ if (!in_irq() && !current->softirq_nestcnt)
-+ wakeup_softirqd();
-+}
-+
-+/*
-+ * This function must run with irqs disabled!
-+ */
-+void raise_softirq_irqoff(unsigned int nr)
-+{
-+ do_raise_softirq_irqoff(nr);
-+
-+ /*
-+ * If we're in an hard interrupt we let irq return code deal
-+ * with the wakeup of ksoftirqd.
-+ */
-+ if (in_irq())
-+ return;
-+ /*
-+ * If we are in thread context but outside of a bh disabled
-+ * region, we need to wake ksoftirqd as well.
-+ *
-+ * CHECKME: Some of the places which do that could be wrapped
-+ * into local_bh_disable/enable pairs. Though it's unclear
-+ * whether this is worth the effort. To find those places just
-+ * raise a WARN() if the condition is met.
-+ */
-+ if (!current->softirq_nestcnt)
-+ wakeup_softirqd();
-+}
-+
-+static inline int ksoftirqd_softirq_pending(void)
-+{
-+ return current->softirqs_raised;
-+}
-+
-+static inline void local_bh_disable_nort(void) { }
-+static inline void _local_bh_enable_nort(void) { }
-+
-+static inline void ksoftirqd_set_sched_params(unsigned int cpu)
-+{
-+ struct sched_param param = { .sched_priority = 1 };
-+
-+ sched_setscheduler(current, SCHED_FIFO, &param);
-+ /* Take over all pending softirqs when starting */
-+ local_irq_disable();
-+ current->softirqs_raised = local_softirq_pending();
-+ local_irq_enable();
-+}
-+
-+static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online)
-+{
-+ struct sched_param param = { .sched_priority = 0 };
-+
-+ sched_setscheduler(current, SCHED_NORMAL, &param);
-+}
-+
-+#endif /* PREEMPT_RT_FULL */
-+/*
- * Enter an interrupt context.
- */
- void irq_enter(void)
-@@ -351,9 +746,9 @@ void irq_enter(void)
- * Prevent raise_softirq from needlessly waking up ksoftirqd
- * here, as softirq will be serviced on return from interrupt.
- */
-- local_bh_disable();
-+ local_bh_disable_nort();
- tick_irq_enter();
-- _local_bh_enable();
-+ _local_bh_enable_nort();
- }
-
- __irq_enter();
-@@ -361,6 +756,7 @@ void irq_enter(void)
-
- static inline void invoke_softirq(void)
- {
-+#ifndef CONFIG_PREEMPT_RT_FULL
- if (ksoftirqd_running(local_softirq_pending()))
- return;
-
-@@ -383,6 +779,15 @@ static inline void invoke_softirq(void)
- } else {
- wakeup_softirqd();
- }
-+#else /* PREEMPT_RT_FULL */
-+ unsigned long flags;
-+
-+ local_irq_save(flags);
-+ if (__this_cpu_read(ksoftirqd) &&
-+ __this_cpu_read(ksoftirqd)->softirqs_raised)
-+ wakeup_softirqd();
-+ local_irq_restore(flags);
-+#endif
- }
-
- static inline void tick_irq_exit(void)
-@@ -418,26 +823,6 @@ void irq_exit(void)
- trace_hardirq_exit(); /* must be last! */
- }
-
--/*
-- * This function must run with irqs disabled!
-- */
--inline void raise_softirq_irqoff(unsigned int nr)
--{
-- __raise_softirq_irqoff(nr);
--
-- /*
-- * If we're in an interrupt or softirq, we're done
-- * (this also catches softirq-disabled code). We will
-- * actually run the softirq once we return from
-- * the irq or softirq.
-- *
-- * Otherwise we wake up ksoftirqd to make sure we
-- * schedule the softirq soon.
-- */
-- if (!in_interrupt())
-- wakeup_softirqd();
--}
--
- void raise_softirq(unsigned int nr)
- {
- unsigned long flags;
-@@ -447,12 +832,6 @@ void raise_softirq(unsigned int nr)
- local_irq_restore(flags);
- }
-
--void __raise_softirq_irqoff(unsigned int nr)
--{
-- trace_softirq_raise(nr);
-- or_softirq_pending(1UL << nr);
--}
--
- void open_softirq(int nr, void (*action)(struct softirq_action *))
- {
- softirq_vec[nr].action = action;
-@@ -726,23 +1105,7 @@ EXPORT_SYMBOL(tasklet_unlock_wait);
-
- static int ksoftirqd_should_run(unsigned int cpu)
- {
-- return local_softirq_pending();
--}
--
--static void run_ksoftirqd(unsigned int cpu)
--{
-- local_irq_disable();
-- if (local_softirq_pending()) {
-- /*
-- * We can safely run softirq on inline stack, as we are not deep
-- * in the task stack here.
-- */
-- __do_softirq();
-- local_irq_enable();
-- cond_resched();
-- return;
-- }
-- local_irq_enable();
-+ return ksoftirqd_softirq_pending();
- }
-
- #ifdef CONFIG_HOTPLUG_CPU
-@@ -809,6 +1172,8 @@ static int takeover_tasklets(unsigned in
-
- static struct smp_hotplug_thread softirq_threads = {
- .store = &ksoftirqd,
-+ .setup = ksoftirqd_set_sched_params,
-+ .cleanup = ksoftirqd_clr_sched_params,
- .thread_should_run = ksoftirqd_should_run,
- .thread_fn = run_ksoftirqd,
- .thread_comm = "ksoftirqd/%u",
---- a/kernel/time/tick-sched.c
-+++ b/kernel/time/tick-sched.c
-@@ -888,14 +888,7 @@ static bool can_stop_idle_tick(int cpu,
- return false;
-
- if (unlikely(local_softirq_pending())) {
-- static int ratelimit;
--
-- if (ratelimit < 10 &&
-- (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
-- pr_warn("NOHZ: local_softirq_pending %02x\n",
-- (unsigned int) local_softirq_pending());
-- ratelimit++;
-- }
-+ softirq_check_pending_idle();
- return false;
- }
-
diff --git a/debian/patches-rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch b/debian/patches-rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
deleted file mode 100644
index d18d6e9ff..000000000
--- a/debian/patches-rt/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
+++ /dev/null
@@ -1,240 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 20 Jan 2016 16:34:17 +0100
-Subject: softirq: split timer softirqs out of ksoftirqd
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-The softirqd runs in -RT with SCHED_FIFO (prio 1) and deals mostly with
-timer wakeup which can not happen in hardirq context. The prio has been
-risen from the normal SCHED_OTHER so the timer wakeup does not happen
-too late.
-With enough networking load it is possible that the system never goes
-idle and schedules ksoftirqd and everything else with a higher priority.
-One of the tasks left behind is one of RCU's threads and so we see stalls
-and eventually run out of memory.
-This patch moves the TIMER and HRTIMER softirqs out of the `ksoftirqd`
-thread into its own `ktimersoftd`. The former can now run SCHED_OTHER
-(same as mainline) and the latter at SCHED_FIFO due to the wakeups.
-
-From networking point of view: The NAPI callback runs after the network
-interrupt thread completes. If its run time takes too long the NAPI code
-itself schedules the `ksoftirqd`. Here in the thread it can run at
-SCHED_OTHER priority and it won't defer RCU anymore.
-
-Cc: stable-rt@vger.kernel.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/interrupt.h | 15 ++++++++
- kernel/softirq.c | 86 +++++++++++++++++++++++++++++++++++++++-------
- 2 files changed, 89 insertions(+), 12 deletions(-)
-
---- a/include/linux/interrupt.h
-+++ b/include/linux/interrupt.h
-@@ -545,12 +545,27 @@ extern void raise_softirq(unsigned int n
- extern void softirq_check_pending_idle(void);
-
- DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
-+DECLARE_PER_CPU(struct task_struct *, ktimer_softirqd);
-
- static inline struct task_struct *this_cpu_ksoftirqd(void)
- {
- return this_cpu_read(ksoftirqd);
- }
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static inline bool task_is_ktimer_softirqd(struct task_struct *tsk)
-+{
-+ return tsk == this_cpu_read(ktimer_softirqd);
-+}
-+
-+#else
-+static inline bool task_is_ktimer_softirqd(struct task_struct *tsk)
-+{
-+ return false;
-+}
-+
-+#endif
-+
- /* Tasklets --- multithreaded analogue of BHs.
-
- Main feature differing them of generic softirqs: tasklet
---- a/kernel/softirq.c
-+++ b/kernel/softirq.c
-@@ -59,6 +59,10 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
- static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
-
- DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+#define TIMER_SOFTIRQS ((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ))
-+DEFINE_PER_CPU(struct task_struct *, ktimer_softirqd);
-+#endif
-
- const char * const softirq_to_name[NR_SOFTIRQS] = {
- "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
-@@ -200,6 +204,18 @@ static void wakeup_softirqd(void)
- wake_up_process(tsk);
- }
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static void wakeup_timer_softirqd(void)
-+{
-+ /* Interrupts are disabled: no need to stop preemption */
-+ struct task_struct *tsk = __this_cpu_read(ktimer_softirqd);
-+
-+ if (tsk && tsk->state != TASK_RUNNING)
-+ wake_up_process(tsk);
-+}
-+
-+#endif
-+
- static void handle_softirq(unsigned int vec_nr)
- {
- struct softirq_action *h = softirq_vec + vec_nr;
-@@ -522,7 +538,6 @@ void __raise_softirq_irqoff(unsigned int
- static inline void local_bh_disable_nort(void) { local_bh_disable(); }
- static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
- static void ksoftirqd_set_sched_params(unsigned int cpu) { }
--static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { }
-
- #else /* !PREEMPT_RT_FULL */
-
-@@ -669,8 +684,12 @@ void thread_do_softirq(void)
-
- static void do_raise_softirq_irqoff(unsigned int nr)
- {
-+ unsigned int mask;
-+
-+ mask = 1UL << nr;
-+
- trace_softirq_raise(nr);
-- or_softirq_pending(1UL << nr);
-+ or_softirq_pending(mask);
-
- /*
- * If we are not in a hard interrupt and inside a bh disabled
-@@ -679,16 +698,29 @@ static void do_raise_softirq_irqoff(unsi
- * delegate it to ksoftirqd.
- */
- if (!in_irq() && current->softirq_nestcnt)
-- current->softirqs_raised |= (1U << nr);
-- else if (__this_cpu_read(ksoftirqd))
-- __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr);
-+ current->softirqs_raised |= mask;
-+ else if (!__this_cpu_read(ksoftirqd) || !__this_cpu_read(ktimer_softirqd))
-+ return;
-+
-+ if (mask & TIMER_SOFTIRQS)
-+ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask;
-+ else
-+ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask;
-+}
-+
-+static void wakeup_proper_softirq(unsigned int nr)
-+{
-+ if ((1UL << nr) & TIMER_SOFTIRQS)
-+ wakeup_timer_softirqd();
-+ else
-+ wakeup_softirqd();
- }
-
- void __raise_softirq_irqoff(unsigned int nr)
- {
- do_raise_softirq_irqoff(nr);
- if (!in_irq() && !current->softirq_nestcnt)
-- wakeup_softirqd();
-+ wakeup_proper_softirq(nr);
- }
-
- /*
-@@ -714,7 +746,7 @@ void raise_softirq_irqoff(unsigned int n
- * raise a WARN() if the condition is met.
- */
- if (!current->softirq_nestcnt)
-- wakeup_softirqd();
-+ wakeup_proper_softirq(nr);
- }
-
- static inline int ksoftirqd_softirq_pending(void)
-@@ -727,22 +759,37 @@ static inline void _local_bh_enable_nort
-
- static inline void ksoftirqd_set_sched_params(unsigned int cpu)
- {
-+ /* Take over all but timer pending softirqs when starting */
-+ local_irq_disable();
-+ current->softirqs_raised = local_softirq_pending() & ~TIMER_SOFTIRQS;
-+ local_irq_enable();
-+}
-+
-+static inline void ktimer_softirqd_set_sched_params(unsigned int cpu)
-+{
- struct sched_param param = { .sched_priority = 1 };
-
- sched_setscheduler(current, SCHED_FIFO, &param);
-- /* Take over all pending softirqs when starting */
-+
-+ /* Take over timer pending softirqs when starting */
- local_irq_disable();
-- current->softirqs_raised = local_softirq_pending();
-+ current->softirqs_raised = local_softirq_pending() & TIMER_SOFTIRQS;
- local_irq_enable();
- }
-
--static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online)
-+static inline void ktimer_softirqd_clr_sched_params(unsigned int cpu,
-+ bool online)
- {
- struct sched_param param = { .sched_priority = 0 };
-
- sched_setscheduler(current, SCHED_NORMAL, &param);
- }
-
-+static int ktimer_softirqd_should_run(unsigned int cpu)
-+{
-+ return current->softirqs_raised;
-+}
-+
- #endif /* PREEMPT_RT_FULL */
- /*
- * Enter an interrupt context.
-@@ -795,6 +842,9 @@ static inline void invoke_softirq(void)
- if (__this_cpu_read(ksoftirqd) &&
- __this_cpu_read(ksoftirqd)->softirqs_raised)
- wakeup_softirqd();
-+ if (__this_cpu_read(ktimer_softirqd) &&
-+ __this_cpu_read(ktimer_softirqd)->softirqs_raised)
-+ wakeup_timer_softirqd();
- local_irq_restore(flags);
- #endif
- }
-@@ -1182,18 +1232,30 @@ static int takeover_tasklets(unsigned in
- static struct smp_hotplug_thread softirq_threads = {
- .store = &ksoftirqd,
- .setup = ksoftirqd_set_sched_params,
-- .cleanup = ksoftirqd_clr_sched_params,
- .thread_should_run = ksoftirqd_should_run,
- .thread_fn = run_ksoftirqd,
- .thread_comm = "ksoftirqd/%u",
- };
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static struct smp_hotplug_thread softirq_timer_threads = {
-+ .store = &ktimer_softirqd,
-+ .setup = ktimer_softirqd_set_sched_params,
-+ .cleanup = ktimer_softirqd_clr_sched_params,
-+ .thread_should_run = ktimer_softirqd_should_run,
-+ .thread_fn = run_ksoftirqd,
-+ .thread_comm = "ktimersoftd/%u",
-+};
-+#endif
-+
- static __init int spawn_ksoftirqd(void)
- {
- cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
- takeover_tasklets);
- BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
--
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ BUG_ON(smpboot_register_percpu_thread(&softirq_timer_threads));
-+#endif
- return 0;
- }
- early_initcall(spawn_ksoftirqd);
diff --git a/debian/patches-rt/spinlock-types-separate-raw.patch b/debian/patches-rt/spinlock-types-separate-raw.patch
index 241af7c05..bf0500d5e 100644
--- a/debian/patches-rt/spinlock-types-separate-raw.patch
+++ b/debian/patches-rt/spinlock-types-separate-raw.patch
@@ -1,7 +1,7 @@
Subject: spinlock: Split the lock types header
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 29 Jun 2011 19:34:01 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Split raw_spinlock into its own file and the remaining spinlock_t into
its own non-RT header. The non-RT header will be replaced later by sleeping
diff --git a/debian/patches-rt/squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch b/debian/patches-rt/squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch
index df233f165..a1d6bc088 100644
--- a/debian/patches-rt/squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch
+++ b/debian/patches-rt/squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch
@@ -2,7 +2,7 @@ From: Julia Cartwright <julia@ni.com>
Date: Mon, 7 May 2018 08:58:57 -0500
Subject: [PATCH] squashfs: make use of local lock in multi_cpu
decompressor
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Currently, the squashfs multi_cpu decompressor makes use of
get_cpu_ptr()/put_cpu_ptr(), which unconditionally disable preemption
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/squashfs/decompressor_multi_percpu.c
+++ b/fs/squashfs/decompressor_multi_percpu.c
-@@ -10,6 +10,7 @@
+@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <linux/percpu.h>
#include <linux/buffer_head.h>
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
-@@ -25,6 +26,8 @@ struct squashfs_stream {
+@@ -23,6 +24,8 @@ struct squashfs_stream {
void *stream;
};
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
void *comp_opts)
{
-@@ -79,10 +82,15 @@ int squashfs_decompress(struct squashfs_
+@@ -77,10 +80,15 @@ int squashfs_decompress(struct squashfs_
{
struct squashfs_stream __percpu *percpu =
(struct squashfs_stream __percpu *) msblk->stream;
diff --git a/debian/patches-rt/srcu-Remove-srcu_queue_delayed_work_on.patch b/debian/patches-rt/srcu-Remove-srcu_queue_delayed_work_on.patch
deleted file mode 100644
index 129a41ad0..000000000
--- a/debian/patches-rt/srcu-Remove-srcu_queue_delayed_work_on.patch
+++ /dev/null
@@ -1,188 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 11 Dec 2018 12:12:38 +0100
-Subject: [PATCH] srcu: Remove srcu_queue_delayed_work_on()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-srcu_queue_delayed_work_on() disables preemption (and therefore CPU
-hotplug in RCU's case) and then checks based on its own accounting if a
-CPU is online. If the CPU is online it uses queue_delayed_work_on()
-otherwise it fallbacks to queue_delayed_work().
-The problem here is that queue_work() on -RT does not work with disabled
-preemption.
-
-queue_work_on() works also on an offlined CPU. queue_delayed_work_on()
-has the problem that it is possible to program a timer on an offlined
-CPU. This timer will fire once the CPU is online again. But until then,
-the timer remains programmed and nothing will happen.
-
-Add a local timer which will fire (as requested per delay) on the local
-CPU and then enqueue the work on the specific CPU.
-
-RCUtorture testing with SRCU-P for 24h showed no problems.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
----
- include/linux/srcutree.h | 3 +-
- kernel/rcu/srcutree.c | 55 ++++++++++++++++++++---------------------------
- kernel/rcu/tree.c | 4 ---
- kernel/rcu/tree.h | 8 ------
- 4 files changed, 26 insertions(+), 44 deletions(-)
-
---- a/include/linux/srcutree.h
-+++ b/include/linux/srcutree.h
-@@ -45,7 +45,8 @@ struct srcu_data {
- unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */
- unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
- bool srcu_cblist_invoking; /* Invoking these CBs? */
-- struct delayed_work work; /* Context for CB invoking. */
-+ struct timer_list delay_work; /* Delay for CB invoking */
-+ struct work_struct work; /* Context for CB invoking. */
- struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */
- struct srcu_node *mynode; /* Leaf srcu_node. */
- unsigned long grpmask; /* Mask for leaf srcu_node */
---- a/kernel/rcu/srcutree.c
-+++ b/kernel/rcu/srcutree.c
-@@ -58,6 +58,7 @@ static bool __read_mostly srcu_init_done
- static void srcu_invoke_callbacks(struct work_struct *work);
- static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
- static void process_srcu(struct work_struct *work);
-+static void srcu_delay_timer(struct timer_list *t);
-
- /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
- #define spin_lock_rcu_node(p) \
-@@ -156,7 +157,8 @@ static void init_srcu_struct_nodes(struc
- snp->grphi = cpu;
- }
- sdp->cpu = cpu;
-- INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
-+ INIT_WORK(&sdp->work, srcu_invoke_callbacks);
-+ timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
- sdp->ssp = ssp;
- sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
- if (is_static)
-@@ -386,13 +388,19 @@ void _cleanup_srcu_struct(struct srcu_st
- } else {
- flush_delayed_work(&ssp->work);
- }
-- for_each_possible_cpu(cpu)
-+ for_each_possible_cpu(cpu) {
-+ struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
-+
- if (quiesced) {
-- if (WARN_ON(delayed_work_pending(&per_cpu_ptr(ssp->sda, cpu)->work)))
-+ if (WARN_ON(timer_pending(&sdp->delay_work)))
-+ return; /* Just leak it! */
-+ if (WARN_ON(work_pending(&sdp->work)))
- return; /* Just leak it! */
- } else {
-- flush_delayed_work(&per_cpu_ptr(ssp->sda, cpu)->work);
-+ del_timer_sync(&sdp->delay_work);
-+ flush_work(&sdp->work);
- }
-+ }
- if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
- WARN_ON(srcu_readers_active(ssp))) {
- pr_info("%s: Active srcu_struct %p state: %d\n",
-@@ -463,39 +471,23 @@ static void srcu_gp_start(struct srcu_st
- WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
- }
-
--/*
-- * Track online CPUs to guide callback workqueue placement.
-- */
--DEFINE_PER_CPU(bool, srcu_online);
-
--void srcu_online_cpu(unsigned int cpu)
-+static void srcu_delay_timer(struct timer_list *t)
- {
-- WRITE_ONCE(per_cpu(srcu_online, cpu), true);
--}
-+ struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
-
--void srcu_offline_cpu(unsigned int cpu)
--{
-- WRITE_ONCE(per_cpu(srcu_online, cpu), false);
-+ queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
- }
-
--/*
-- * Place the workqueue handler on the specified CPU if online, otherwise
-- * just run it whereever. This is useful for placing workqueue handlers
-- * that are to invoke the specified CPU's callbacks.
-- */
--static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
-- struct delayed_work *dwork,
-+static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
- unsigned long delay)
- {
-- bool ret;
-+ if (!delay) {
-+ queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
-+ return;
-+ }
-
-- preempt_disable();
-- if (READ_ONCE(per_cpu(srcu_online, cpu)))
-- ret = queue_delayed_work_on(cpu, wq, dwork, delay);
-- else
-- ret = queue_delayed_work(wq, dwork, delay);
-- preempt_enable();
-- return ret;
-+ timer_reduce(&sdp->delay_work, jiffies + delay);
- }
-
- /*
-@@ -504,7 +496,7 @@ static bool srcu_queue_delayed_work_on(i
- */
- static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
- {
-- srcu_queue_delayed_work_on(sdp->cpu, rcu_gp_wq, &sdp->work, delay);
-+ srcu_queue_delayed_work_on(sdp, delay);
- }
-
- /*
-@@ -1186,7 +1178,8 @@ static void srcu_invoke_callbacks(struct
- struct srcu_data *sdp;
- struct srcu_struct *ssp;
-
-- sdp = container_of(work, struct srcu_data, work.work);
-+ sdp = container_of(work, struct srcu_data, work);
-+
- ssp = sdp->ssp;
- rcu_cblist_init(&ready_cbs);
- spin_lock_irq_rcu_node(sdp);
---- a/kernel/rcu/tree.c
-+++ b/kernel/rcu/tree.c
-@@ -3433,8 +3433,6 @@ int rcutree_online_cpu(unsigned int cpu)
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
- rnp->ffmask |= rdp->grpmask;
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-- if (IS_ENABLED(CONFIG_TREE_SRCU))
-- srcu_online_cpu(cpu);
- if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
- return 0; /* Too early in boot for scheduler work. */
- sync_sched_exp_online_cleanup(cpu);
-@@ -3459,8 +3457,6 @@ int rcutree_offline_cpu(unsigned int cpu
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-
- rcutree_affinity_setting(cpu, cpu);
-- if (IS_ENABLED(CONFIG_TREE_SRCU))
-- srcu_offline_cpu(cpu);
- return 0;
- }
-
---- a/kernel/rcu/tree.h
-+++ b/kernel/rcu/tree.h
-@@ -460,11 +460,3 @@ static void rcu_bind_gp_kthread(void);
- static bool rcu_nohz_full_cpu(void);
- static void rcu_dynticks_task_enter(void);
- static void rcu_dynticks_task_exit(void);
--
--#ifdef CONFIG_SRCU
--void srcu_online_cpu(unsigned int cpu);
--void srcu_offline_cpu(unsigned int cpu);
--#else /* #ifdef CONFIG_SRCU */
--void srcu_online_cpu(unsigned int cpu) { }
--void srcu_offline_cpu(unsigned int cpu) { }
--#endif /* #else #ifdef CONFIG_SRCU */
diff --git a/debian/patches-rt/srcu-replace-local_irqsave-with-a-locallock.patch b/debian/patches-rt/srcu-replace-local_irqsave-with-a-locallock.patch
index 10f82a27a..b7ed5ae77 100644
--- a/debian/patches-rt/srcu-replace-local_irqsave-with-a-locallock.patch
+++ b/debian/patches-rt/srcu-replace-local_irqsave-with-a-locallock.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 12 Oct 2017 18:37:12 +0200
Subject: [PATCH] srcu: replace local_irqsave() with a locallock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
There are two instances which disable interrupts in order to become a
stable this_cpu_ptr() pointer. The restore part is coupled with
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
-@@ -38,6 +38,7 @@
+@@ -25,6 +25,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/srcu.h>
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "rcu.h"
#include "rcu_segcblist.h"
-@@ -752,6 +753,7 @@ static void srcu_flip(struct srcu_struct
+@@ -735,6 +736,7 @@ static void srcu_flip(struct srcu_struct
smp_mb(); /* D */ /* Pairs with C. */
}
@@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* If SRCU is likely idle, return true, otherwise return false.
*
-@@ -781,13 +783,13 @@ static bool srcu_might_be_idle(struct sr
+@@ -764,13 +766,13 @@ static bool srcu_might_be_idle(struct sr
unsigned long t;
/* If the local srcu_data structure has callbacks, not idle. */
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* No local callbacks, so probabalistically probe global state.
-@@ -867,7 +869,7 @@ void __call_srcu(struct srcu_struct *ssp
+@@ -850,7 +852,7 @@ void __call_srcu(struct srcu_struct *ssp
}
rhp->func = func;
idx = srcu_read_lock(ssp);
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
sdp = this_cpu_ptr(ssp->sda);
spin_lock_rcu_node(sdp);
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
-@@ -883,7 +885,8 @@ void __call_srcu(struct srcu_struct *ssp
+@@ -866,7 +868,8 @@ void __call_srcu(struct srcu_struct *ssp
sdp->srcu_gp_seq_needed_exp = s;
needexp = true;
}
diff --git a/debian/patches-rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch b/debian/patches-rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
index 494c8baa2..6f68688b5 100644
--- a/debian/patches-rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
+++ b/debian/patches-rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
@@ -1,7 +1,7 @@
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Wed, 18 Feb 2015 16:05:28 +0100
Subject: sunrpc: Make svc_xprt_do_enqueue() use get_cpu_light()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915
|in_atomic(): 1, irqs_disabled(): 0, pid: 3194, name: rpc.nfsd
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
-@@ -392,7 +392,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
+@@ -411,7 +411,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
return;
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
atomic_long_inc(&pool->sp_stats.packets);
-@@ -416,7 +416,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
+@@ -435,7 +435,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
rqstp = NULL;
out_unlock:
rcu_read_unlock();
diff --git a/debian/patches-rt/sysfs-realtime-entry.patch b/debian/patches-rt/sysfs-realtime-entry.patch
index d06baaf6d..5d02bca74 100644
--- a/debian/patches-rt/sysfs-realtime-entry.patch
+++ b/debian/patches-rt/sysfs-realtime-entry.patch
@@ -1,7 +1,7 @@
Subject: sysfs: Add /sys/kernel/realtime entry
From: Clark Williams <williams@redhat.com>
Date: Sat Jul 30 21:55:53 2011 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Add a /sys/kernel entry to indicate that the kernel is a
realtime kernel.
@@ -20,7 +20,7 @@ Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
-@@ -140,6 +140,15 @@ KERNEL_ATTR_RO(vmcoreinfo);
+@@ -138,6 +138,15 @@ KERNEL_ATTR_RO(vmcoreinfo);
#endif /* CONFIG_CRASH_CORE */
@@ -36,7 +36,7 @@ Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
/* whether file capabilities are enabled */
static ssize_t fscaps_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
-@@ -231,6 +240,9 @@ static struct attribute * kernel_attrs[]
+@@ -229,6 +238,9 @@ static struct attribute * kernel_attrs[]
&rcu_expedited_attr.attr,
&rcu_normal_attr.attr,
#endif
diff --git a/debian/patches-rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch b/debian/patches-rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
deleted file mode 100644
index 8335a96c4..000000000
--- a/debian/patches-rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
+++ /dev/null
@@ -1,295 +0,0 @@
-Subject: tasklet: Prevent tasklets from going into infinite spin in RT
-From: Ingo Molnar <mingo@elte.hu>
-Date: Tue Nov 29 20:18:22 2011 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-When CONFIG_PREEMPT_RT_FULL is enabled, tasklets run as threads,
-and spinlocks turn are mutexes. But this can cause issues with
-tasks disabling tasklets. A tasklet runs under ksoftirqd, and
-if a tasklets are disabled with tasklet_disable(), the tasklet
-count is increased. When a tasklet runs, it checks this counter
-and if it is set, it adds itself back on the softirq queue and
-returns.
-
-The problem arises in RT because ksoftirq will see that a softirq
-is ready to run (the tasklet softirq just re-armed itself), and will
-not sleep, but instead run the softirqs again. The tasklet softirq
-will still see that the count is non-zero and will not execute
-the tasklet and requeue itself on the softirq again, which will
-cause ksoftirqd to run it again and again and again.
-
-It gets worse because ksoftirqd runs as a real-time thread.
-If it preempted the task that disabled tasklets, and that task
-has migration disabled, or can't run for other reasons, the tasklet
-softirq will never run because the count will never be zero, and
-ksoftirqd will go into an infinite loop. As an RT task, it this
-becomes a big problem.
-
-This is a hack solution to have tasklet_disable stop tasklets, and
-when a tasklet runs, instead of requeueing the tasklet softirqd
-it delays it. When tasklet_enable() is called, and tasklets are
-waiting, then the tasklet_enable() will kick the tasklets to continue.
-This prevents the lock up from ksoftirq going into an infinite loop.
-
-[ rostedt@goodmis.org: ported to 3.0-rt ]
-
-Signed-off-by: Ingo Molnar <mingo@elte.hu>
-Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- include/linux/interrupt.h | 33 ++++++------
- kernel/softirq.c | 126 ++++++++++++++++++++++++++++++++++++++--------
- 2 files changed, 125 insertions(+), 34 deletions(-)
-
---- a/include/linux/interrupt.h
-+++ b/include/linux/interrupt.h
-@@ -557,8 +557,9 @@ static inline struct task_struct *this_c
- to be executed on some cpu at least once after this.
- * If the tasklet is already scheduled, but its execution is still not
- started, it will be executed only once.
-- * If this tasklet is already running on another CPU (or schedule is called
-- from tasklet itself), it is rescheduled for later.
-+ * If this tasklet is already running on another CPU, it is rescheduled
-+ for later.
-+ * Schedule must not be called from the tasklet itself (a lockup occurs)
- * Tasklet is strictly serialized wrt itself, but not
- wrt another tasklets. If client needs some intertask synchronization,
- he makes it with spinlocks.
-@@ -583,27 +584,36 @@ struct tasklet_struct name = { NULL, 0,
- enum
- {
- TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
-- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
-+ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
-+ TASKLET_STATE_PENDING /* Tasklet is pending */
- };
-
--#ifdef CONFIG_SMP
-+#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
-+#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
-+#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
-+
-+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
- static inline int tasklet_trylock(struct tasklet_struct *t)
- {
- return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
- }
-
-+static inline int tasklet_tryunlock(struct tasklet_struct *t)
-+{
-+ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
-+}
-+
- static inline void tasklet_unlock(struct tasklet_struct *t)
- {
- smp_mb__before_atomic();
- clear_bit(TASKLET_STATE_RUN, &(t)->state);
- }
-
--static inline void tasklet_unlock_wait(struct tasklet_struct *t)
--{
-- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
--}
-+extern void tasklet_unlock_wait(struct tasklet_struct *t);
-+
- #else
- #define tasklet_trylock(t) 1
-+#define tasklet_tryunlock(t) 1
- #define tasklet_unlock_wait(t) do { } while (0)
- #define tasklet_unlock(t) do { } while (0)
- #endif
-@@ -637,12 +647,7 @@ static inline void tasklet_disable(struc
- smp_mb();
- }
-
--static inline void tasklet_enable(struct tasklet_struct *t)
--{
-- smp_mb__before_atomic();
-- atomic_dec(&t->count);
--}
--
-+extern void tasklet_enable(struct tasklet_struct *t);
- extern void tasklet_kill(struct tasklet_struct *t);
- extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
- extern void tasklet_init(struct tasklet_struct *t,
---- a/kernel/softirq.c
-+++ b/kernel/softirq.c
-@@ -21,6 +21,7 @@
- #include <linux/freezer.h>
- #include <linux/kthread.h>
- #include <linux/rcupdate.h>
-+#include <linux/delay.h>
- #include <linux/ftrace.h>
- #include <linux/smp.h>
- #include <linux/smpboot.h>
-@@ -476,11 +477,38 @@ static void __tasklet_schedule_common(st
- unsigned long flags;
-
- local_irq_save(flags);
-+ if (!tasklet_trylock(t)) {
-+ local_irq_restore(flags);
-+ return;
-+ }
-+
- head = this_cpu_ptr(headp);
-- t->next = NULL;
-- *head->tail = t;
-- head->tail = &(t->next);
-- raise_softirq_irqoff(softirq_nr);
-+again:
-+ /* We may have been preempted before tasklet_trylock
-+ * and __tasklet_action may have already run.
-+ * So double check the sched bit while the takslet
-+ * is locked before adding it to the list.
-+ */
-+ if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
-+ t->next = NULL;
-+ *head->tail = t;
-+ head->tail = &(t->next);
-+ raise_softirq_irqoff(softirq_nr);
-+ tasklet_unlock(t);
-+ } else {
-+ /* This is subtle. If we hit the corner case above
-+ * It is possible that we get preempted right here,
-+ * and another task has successfully called
-+ * tasklet_schedule(), then this function, and
-+ * failed on the trylock. Thus we must be sure
-+ * before releasing the tasklet lock, that the
-+ * SCHED_BIT is clear. Otherwise the tasklet
-+ * may get its SCHED_BIT set, but not added to the
-+ * list
-+ */
-+ if (!tasklet_tryunlock(t))
-+ goto again;
-+ }
- local_irq_restore(flags);
- }
-
-@@ -498,11 +526,21 @@ void __tasklet_hi_schedule(struct taskle
- }
- EXPORT_SYMBOL(__tasklet_hi_schedule);
-
-+void tasklet_enable(struct tasklet_struct *t)
-+{
-+ if (!atomic_dec_and_test(&t->count))
-+ return;
-+ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
-+ tasklet_schedule(t);
-+}
-+EXPORT_SYMBOL(tasklet_enable);
-+
- static void tasklet_action_common(struct softirq_action *a,
- struct tasklet_head *tl_head,
- unsigned int softirq_nr)
- {
- struct tasklet_struct *list;
-+ int loops = 1000000;
-
- local_irq_disable();
- list = tl_head->head;
-@@ -514,25 +552,56 @@ static void tasklet_action_common(struct
- struct tasklet_struct *t = list;
-
- list = list->next;
-+ /*
-+ * Should always succeed - after a tasklist got on the
-+ * list (after getting the SCHED bit set from 0 to 1),
-+ * nothing but the tasklet softirq it got queued to can
-+ * lock it:
-+ */
-+ if (!tasklet_trylock(t)) {
-+ WARN_ON(1);
-+ continue;
-+ }
-
-- if (tasklet_trylock(t)) {
-- if (!atomic_read(&t->count)) {
-- if (!test_and_clear_bit(TASKLET_STATE_SCHED,
-- &t->state))
-- BUG();
-- t->func(t->data);
-+ t->next = NULL;
-+
-+ if (unlikely(atomic_read(&t->count))) {
-+out_disabled:
-+ /* implicit unlock: */
-+ wmb();
-+ t->state = TASKLET_STATEF_PENDING;
-+ continue;
-+ }
-+ /*
-+ * After this point on the tasklet might be rescheduled
-+ * on another CPU, but it can only be added to another
-+ * CPU's tasklet list if we unlock the tasklet (which we
-+ * dont do yet).
-+ */
-+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
-+ WARN_ON(1);
-+again:
-+ t->func(t->data);
-+
-+ while (!tasklet_tryunlock(t)) {
-+ /*
-+ * If it got disabled meanwhile, bail out:
-+ */
-+ if (atomic_read(&t->count))
-+ goto out_disabled;
-+ /*
-+ * If it got scheduled meanwhile, re-execute
-+ * the tasklet function:
-+ */
-+ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
-+ goto again;
-+ if (!--loops) {
-+ printk("hm, tasklet state: %08lx\n", t->state);
-+ WARN_ON(1);
- tasklet_unlock(t);
-- continue;
-+ break;
- }
-- tasklet_unlock(t);
- }
--
-- local_irq_disable();
-- t->next = NULL;
-- *tl_head->tail = t;
-- tl_head->tail = &t->next;
-- __raise_softirq_irqoff(softirq_nr);
-- local_irq_enable();
- }
- }
-
-@@ -564,7 +633,7 @@ void tasklet_kill(struct tasklet_struct
-
- while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
- do {
-- yield();
-+ msleep(1);
- } while (test_bit(TASKLET_STATE_SCHED, &t->state));
- }
- tasklet_unlock_wait(t);
-@@ -638,6 +707,23 @@ void __init softirq_init(void)
- open_softirq(HI_SOFTIRQ, tasklet_hi_action);
- }
-
-+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
-+void tasklet_unlock_wait(struct tasklet_struct *t)
-+{
-+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
-+ /*
-+ * Hack for now to avoid this busy-loop:
-+ */
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ msleep(1);
-+#else
-+ barrier();
-+#endif
-+ }
-+}
-+EXPORT_SYMBOL(tasklet_unlock_wait);
-+#endif
-+
- static int ksoftirqd_should_run(unsigned int cpu)
- {
- return local_softirq_pending();
diff --git a/debian/patches-rt/thermal-Defer-thermal-wakups-to-threads.patch b/debian/patches-rt/thermal-Defer-thermal-wakups-to-threads.patch
deleted file mode 100644
index c6499cb76..000000000
--- a/debian/patches-rt/thermal-Defer-thermal-wakups-to-threads.patch
+++ /dev/null
@@ -1,89 +0,0 @@
-From: Daniel Wagner <wagi@monom.org>
-Date: Tue, 17 Feb 2015 09:37:44 +0100
-Subject: thermal: Defer thermal wakups to threads
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-On RT the spin lock in pkg_temp_thermal_platfrom_thermal_notify will
-call schedule while we run in irq context.
-
-[<ffffffff816850ac>] dump_stack+0x4e/0x8f
-[<ffffffff81680f7d>] __schedule_bug+0xa6/0xb4
-[<ffffffff816896b4>] __schedule+0x5b4/0x700
-[<ffffffff8168982a>] schedule+0x2a/0x90
-[<ffffffff8168a8b5>] rt_spin_lock_slowlock+0xe5/0x2d0
-[<ffffffff8168afd5>] rt_spin_lock+0x25/0x30
-[<ffffffffa03a7b75>] pkg_temp_thermal_platform_thermal_notify+0x45/0x134 [x86_pkg_temp_thermal]
-[<ffffffff8103d4db>] ? therm_throt_process+0x1b/0x160
-[<ffffffff8103d831>] intel_thermal_interrupt+0x211/0x250
-[<ffffffff8103d8c1>] smp_thermal_interrupt+0x21/0x40
-[<ffffffff8169415d>] thermal_interrupt+0x6d/0x80
-
-Let's defer the work to a kthread.
-
-Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
-[bigeasy: reoder init/denit position. TODO: flush swork on exit]
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/thermal/intel/x86_pkg_temp_thermal.c | 28 ++++++++++++++++++++++++++-
- 1 file changed, 27 insertions(+), 1 deletion(-)
-
---- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
-+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
-@@ -29,6 +29,7 @@
- #include <linux/pm.h>
- #include <linux/thermal.h>
- #include <linux/debugfs.h>
-+#include <linux/kthread.h>
- #include <asm/cpu_device_id.h>
- #include <asm/mce.h>
-
-@@ -329,7 +330,7 @@ static void pkg_thermal_schedule_work(in
- schedule_delayed_work_on(cpu, work, ms);
- }
-
--static int pkg_thermal_notify(u64 msr_val)
-+static void pkg_thermal_notify_work(struct kthread_work *work)
- {
- int cpu = smp_processor_id();
- struct pkg_device *pkgdev;
-@@ -348,8 +349,32 @@ static int pkg_thermal_notify(u64 msr_va
- }
-
- spin_unlock_irqrestore(&pkg_temp_lock, flags);
-+}
-+
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static DEFINE_KTHREAD_WORK(notify_work, pkg_thermal_notify_work);
-+
-+static int pkg_thermal_notify(u64 msr_val)
-+{
-+ kthread_schedule_work(&notify_work);
-+ return 0;
-+}
-+
-+static void pkg_thermal_notify_flush(void)
-+{
-+ kthread_flush_work(&notify_work);
-+}
-+
-+#else /* !CONFIG_PREEMPT_RT_FULL */
-+
-+static void pkg_thermal_notify_flush(void) { }
-+
-+static int pkg_thermal_notify(u64 msr_val)
-+{
-+ pkg_thermal_notify_work(NULL);
- return 0;
- }
-+#endif /* CONFIG_PREEMPT_RT_FULL */
-
- static int pkg_temp_thermal_device_add(unsigned int cpu)
- {
-@@ -548,6 +573,7 @@ static void __exit pkg_temp_thermal_exit
- platform_thermal_package_rate_control = NULL;
-
- cpuhp_remove_state(pkg_thermal_hp_state);
-+ pkg_thermal_notify_flush();
- debugfs_remove_recursive(debugfs);
- kfree(packages);
- }
diff --git a/debian/patches-rt/time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch b/debian/patches-rt/time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch
deleted file mode 100644
index 3444bb494..000000000
--- a/debian/patches-rt/time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch
+++ /dev/null
@@ -1,46 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 15 Nov 2017 17:29:51 +0100
-Subject: [PATCH] time/hrtimer: avoid schedule_work() with interrupts disabled
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-The NOHZ code tries to schedule a workqueue with interrupts disabled.
-Since this does not work -RT I am switching it to swork instead.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/timer.c | 8 +++-----
- 1 file changed, 3 insertions(+), 5 deletions(-)
-
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -216,9 +216,6 @@ static DEFINE_PER_CPU(struct timer_base,
- static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
- static DEFINE_MUTEX(timer_keys_mutex);
-
--static void timer_update_keys(struct work_struct *work);
--static DECLARE_WORK(timer_update_work, timer_update_keys);
--
- #ifdef CONFIG_SMP
- unsigned int sysctl_timer_migration = 1;
-
-@@ -235,17 +232,18 @@ static void timers_update_migration(void
- static inline void timers_update_migration(void) { }
- #endif /* !CONFIG_SMP */
-
--static void timer_update_keys(struct work_struct *work)
-+static void timer_update_keys(struct kthread_work *work)
- {
- mutex_lock(&timer_keys_mutex);
- timers_update_migration();
- static_branch_enable(&timers_nohz_active);
- mutex_unlock(&timer_keys_mutex);
- }
-+static DEFINE_KTHREAD_WORK(timer_update_swork, timer_update_keys);
-
- void timers_update_nohz(void)
- {
-- schedule_work(&timer_update_work);
-+ kthread_schedule_work(&timer_update_swork);
- }
-
- int timer_migration_handler(struct ctl_table *table, int write,
diff --git a/debian/patches-rt/timekeeping-split-jiffies-lock.patch b/debian/patches-rt/timekeeping-split-jiffies-lock.patch
index d3d90c586..62174a1d1 100644
--- a/debian/patches-rt/timekeeping-split-jiffies-lock.patch
+++ b/debian/patches-rt/timekeeping-split-jiffies-lock.patch
@@ -1,7 +1,7 @@
Subject: timekeeping: Split jiffies seqlock
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 14 Feb 2013 22:36:59 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Replace jiffies_lock seqlock with a simple seqcounter and a rawlock so
it can be taken in atomic context on RT.
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(get_jiffies_64);
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
-@@ -75,13 +75,15 @@ int tick_is_oneshot_available(void)
+@@ -83,13 +83,15 @@ int tick_is_oneshot_available(void)
static void tick_periodic(int cpu)
{
if (tick_do_timer_cpu == cpu) {
@@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
update_wall_time();
}
-@@ -153,9 +155,9 @@ void tick_setup_periodic(struct clock_ev
+@@ -161,9 +163,9 @@ void tick_setup_periodic(struct clock_ev
ktime_t next;
do {
@@ -115,7 +115,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return period;
}
-@@ -649,10 +654,10 @@ static ktime_t tick_nohz_next_event(stru
+@@ -659,10 +664,10 @@ static ktime_t tick_nohz_next_event(stru
/* Read jiffies and the time when jiffies were updated last */
do {
@@ -130,7 +130,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
-@@ -2380,8 +2380,10 @@ EXPORT_SYMBOL(hardpps);
+@@ -2392,8 +2392,10 @@ EXPORT_SYMBOL(hardpps);
*/
void xtime_update(unsigned long ticks)
{
diff --git a/debian/patches-rt/timers-Drop-expiry-lock-after-each-timer-invocation.patch b/debian/patches-rt/timers-Drop-expiry-lock-after-each-timer-invocation.patch
new file mode 100644
index 000000000..e62590a2b
--- /dev/null
+++ b/debian/patches-rt/timers-Drop-expiry-lock-after-each-timer-invocation.patch
@@ -0,0 +1,50 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Thu, 10 Jan 2019 13:00:07 +0100
+Subject: [PATCH] timers: Drop expiry lock after each timer invocation
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+The ->expiry_lock lock is held until every timer is expired. So if a
+__del_timer_sync() caller blocks on the lock then it has to wait until
+every timer callback has completed.
+
+Therefore drop the lock and acquire it after expiring the timer. To be
+able to remove the timer, when it was expired, the running_timer struct
+member has to be resetted to NULL as well. This happens after the timer
+was expired. It is ok to do this lockless, because the only problem
+could be that when a check is done too early, the old expired timer is
+stored in there.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+[bigeasy: Patch description reworded]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/timer.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1384,10 +1384,16 @@ static void expire_timers(struct timer_b
+ if (timer->flags & TIMER_IRQSAFE) {
+ raw_spin_unlock(&base->lock);
+ call_timer_fn(timer, fn, baseclk);
++ base->running_timer = NULL;
++ spin_unlock(&base->expiry_lock);
++ spin_lock(&base->expiry_lock);
+ raw_spin_lock(&base->lock);
+ } else {
+ raw_spin_unlock_irq(&base->lock);
+ call_timer_fn(timer, fn, baseclk);
++ base->running_timer = NULL;
++ spin_unlock(&base->expiry_lock);
++ spin_lock(&base->expiry_lock);
+ raw_spin_lock_irq(&base->lock);
+ }
+ }
+@@ -1709,7 +1715,6 @@ static inline void __run_timers(struct t
+ while (levels--)
+ expire_timers(base, heads + levels);
+ }
+- base->running_timer = NULL;
+ raw_spin_unlock_irq(&base->lock);
+ spin_unlock(&base->expiry_lock);
+ }
diff --git a/debian/patches-rt/timers-Introduce-expiry-spin-lock.patch b/debian/patches-rt/timers-Introduce-expiry-spin-lock.patch
new file mode 100644
index 000000000..69dc03c5f
--- /dev/null
+++ b/debian/patches-rt/timers-Introduce-expiry-spin-lock.patch
@@ -0,0 +1,153 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Thu, 10 Jan 2019 13:00:06 +0100
+Subject: [PATCH] timers: Introduce expiry spin lock
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+When del_timer_sync() is called, it is possible, that the CPU has to
+spin, because the timer is marked as running. The function will
+repeatedly try to delete the timer until the timer callback completes
+and the function succeeds.
+On a virtual machine this spinning can waste CPU cycles if the vCPU
+invoking the timer callback is not scheduled by the host (and making no
+progress).
+
+The spinning and time wasting, could be prevented by using PARAVIRT_SPINLOCKS
+and introducing a per timer base spin lock for expiry. The lock is hold during
+expiring the timers of a base. When the deletion of a timer wasn't successful,
+because the timer is running at the moment, the expiry lock is trying to
+accessed instead of cpu_realax(). The lock is already held by the CPU expiring
+the timers, so the CPU could be scheduled out instead of spinning until the lock
+is released, because of the PARAVIRT_SPINLOCKS code. Thereby wasting time
+spinning around is prevented.
+
+The code isn't done conditionally on PARAVIRT_SPINLOCKS. The lock is taken only
+at two places. In one of them the lock is directly dropped after accessing
+it. So the probability for a slowpath when taking the lock is very low. But this
+keeps the code cleaner than introducing several CONFIG_PARAVIRT_SPINLOCKS
+dependend code paths and struct members.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+[bigeasy: Patch description reworded]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/timer.c | 57 ++++++++++++++++++++++++++++++++++++++--------------
+ 1 file changed, 42 insertions(+), 15 deletions(-)
+
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -196,6 +196,7 @@ EXPORT_SYMBOL(jiffies_64);
+ struct timer_base {
+ raw_spinlock_t lock;
+ struct timer_list *running_timer;
++ spinlock_t expiry_lock;
+ unsigned long clk;
+ unsigned long next_expiry;
+ unsigned int cpu;
+@@ -1201,14 +1202,8 @@ int del_timer(struct timer_list *timer)
+ }
+ EXPORT_SYMBOL(del_timer);
+
+-/**
+- * try_to_del_timer_sync - Try to deactivate a timer
+- * @timer: timer to delete
+- *
+- * This function tries to deactivate a timer. Upon successful (ret >= 0)
+- * exit the timer is not queued and the handler is not running on any CPU.
+- */
+-int try_to_del_timer_sync(struct timer_list *timer)
++static int __try_to_del_timer_sync(struct timer_list *timer,
++ struct timer_base **basep)
+ {
+ struct timer_base *base;
+ unsigned long flags;
+@@ -1216,7 +1211,7 @@ int try_to_del_timer_sync(struct timer_l
+
+ debug_assert_init(timer);
+
+- base = lock_timer_base(timer, &flags);
++ *basep = base = lock_timer_base(timer, &flags);
+
+ if (base->running_timer != timer)
+ ret = detach_if_pending(timer, base, true);
+@@ -1225,9 +1220,42 @@ int try_to_del_timer_sync(struct timer_l
+
+ return ret;
+ }
++
++/**
++ * try_to_del_timer_sync - Try to deactivate a timer
++ * @timer: timer to delete
++ *
++ * This function tries to deactivate a timer. Upon successful (ret >= 0)
++ * exit the timer is not queued and the handler is not running on any CPU.
++ */
++int try_to_del_timer_sync(struct timer_list *timer)
++{
++ struct timer_base *base;
++
++ return __try_to_del_timer_sync(timer, &base);
++}
+ EXPORT_SYMBOL(try_to_del_timer_sync);
+
+ #ifdef CONFIG_SMP
++static int __del_timer_sync(struct timer_list *timer)
++{
++ struct timer_base *base;
++ int ret;
++
++ for (;;) {
++ ret = __try_to_del_timer_sync(timer, &base);
++ if (ret >= 0)
++ return ret;
++
++ /*
++ * When accessing the lock, timers of base are no longer expired
++ * and so timer is no longer running.
++ */
++ spin_lock(&base->expiry_lock);
++ spin_unlock(&base->expiry_lock);
++ }
++}
++
+ /**
+ * del_timer_sync - deactivate a timer and wait for the handler to finish.
+ * @timer: the timer to be deactivated
+@@ -1283,12 +1311,8 @@ int del_timer_sync(struct timer_list *ti
+ * could lead to deadlock.
+ */
+ WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
+- for (;;) {
+- int ret = try_to_del_timer_sync(timer);
+- if (ret >= 0)
+- return ret;
+- cpu_relax();
+- }
++
++ return __del_timer_sync(timer);
+ }
+ EXPORT_SYMBOL(del_timer_sync);
+ #endif
+@@ -1658,6 +1682,7 @@ static inline void __run_timers(struct t
+ if (!time_after_eq(jiffies, base->clk))
+ return;
+
++ spin_lock(&base->expiry_lock);
+ raw_spin_lock_irq(&base->lock);
+
+ /*
+@@ -1686,6 +1711,7 @@ static inline void __run_timers(struct t
+ }
+ base->running_timer = NULL;
+ raw_spin_unlock_irq(&base->lock);
++ spin_unlock(&base->expiry_lock);
+ }
+
+ /*
+@@ -1930,6 +1956,7 @@ static void __init init_timer_cpu(int cp
+ base->cpu = cpu;
+ raw_spin_lock_init(&base->lock);
+ base->clk = jiffies;
++ spin_lock_init(&base->expiry_lock);
+ }
+ }
+
diff --git a/debian/patches-rt/timers-prepare-for-full-preemption.patch b/debian/patches-rt/timers-prepare-for-full-preemption.patch
index 3efb2a698..c03a2ffa1 100644
--- a/debian/patches-rt/timers-prepare-for-full-preemption.patch
+++ b/debian/patches-rt/timers-prepare-for-full-preemption.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:34 -0500
Subject: timers: Prepare for full preemption
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
When softirqs can be preempted we need to make sure that cancelling
the timer from the active thread can not deadlock vs. a running timer
@@ -9,12 +9,10 @@ callback. Add a waitqueue to resolve that.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
---
include/linux/timer.h | 2 +-
- kernel/sched/core.c | 9 +++++++--
- kernel/time/timer.c | 45 +++++++++++++++++++++++++++++++++++++++++----
- 3 files changed, 49 insertions(+), 7 deletions(-)
+ kernel/time/timer.c | 5 +++--
+ 2 files changed, 4 insertions(+), 3 deletions(-)
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -27,107 +25,18 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern int del_timer_sync(struct timer_list *timer);
#else
# define del_timer_sync(t) del_timer(t)
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -510,11 +510,14 @@ void resched_cpu(int cpu)
- */
- int get_nohz_timer_target(void)
- {
-- int i, cpu = smp_processor_id();
-+ int i, cpu;
- struct sched_domain *sd;
-
-+ preempt_disable_rt();
-+ cpu = smp_processor_id();
-+
- if (!idle_cpu(cpu) && housekeeping_cpu(cpu, HK_FLAG_TIMER))
-- return cpu;
-+ goto preempt_en_rt;
-
- rcu_read_lock();
- for_each_domain(cpu, sd) {
-@@ -533,6 +536,8 @@ int get_nohz_timer_target(void)
- cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
- unlock:
- rcu_read_unlock();
-+preempt_en_rt:
-+ preempt_enable_rt();
- return cpu;
- }
-
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -43,6 +43,7 @@
- #include <linux/sched/debug.h>
- #include <linux/slab.h>
- #include <linux/compat.h>
-+#include <linux/swait.h>
-
- #include <linux/uaccess.h>
- #include <asm/unistd.h>
-@@ -196,6 +197,9 @@ EXPORT_SYMBOL(jiffies_64);
- struct timer_base {
- raw_spinlock_t lock;
- struct timer_list *running_timer;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ struct swait_queue_head wait_for_running_timer;
-+#endif
- unsigned long clk;
- unsigned long next_expiry;
- unsigned int cpu;
-@@ -1177,6 +1181,33 @@ void add_timer_on(struct timer_list *tim
- }
- EXPORT_SYMBOL_GPL(add_timer_on);
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+/*
-+ * Wait for a running timer
-+ */
-+static void wait_for_running_timer(struct timer_list *timer)
-+{
-+ struct timer_base *base;
-+ u32 tf = timer->flags;
-+
-+ if (tf & TIMER_MIGRATING)
-+ return;
-+
-+ base = get_timer_base(tf);
-+ swait_event_exclusive(base->wait_for_running_timer,
-+ base->running_timer != timer);
-+}
-+
-+# define wakeup_timer_waiters(b) swake_up_all(&(b)->wait_for_running_timer)
-+#else
-+static inline void wait_for_running_timer(struct timer_list *timer)
-+{
-+ cpu_relax();
-+}
-+
-+# define wakeup_timer_waiters(b) do { } while (0)
-+#endif
-+
- /**
- * del_timer - deactivate a timer.
- * @timer: the timer to be deactivated
-@@ -1232,7 +1263,7 @@ int try_to_del_timer_sync(struct timer_l
+@@ -1236,7 +1236,7 @@ int try_to_del_timer_sync(struct timer_l
}
EXPORT_SYMBOL(try_to_del_timer_sync);
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
- /**
- * del_timer_sync - deactivate a timer and wait for the handler to finish.
- * @timer: the timer to be deactivated
-@@ -1292,7 +1323,7 @@ int del_timer_sync(struct timer_list *ti
- int ret = try_to_del_timer_sync(timer);
- if (ret >= 0)
- return ret;
-- cpu_relax();
-+ wait_for_running_timer(timer);
- }
- }
- EXPORT_SYMBOL(del_timer_sync);
-@@ -1353,13 +1384,16 @@ static void expire_timers(struct timer_b
+ static int __del_timer_sync(struct timer_list *timer)
+ {
+ struct timer_base *base;
+@@ -1381,7 +1381,8 @@ static void expire_timers(struct timer_b
fn = timer->function;
@@ -135,33 +44,5 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL) &&
+ timer->flags & TIMER_IRQSAFE) {
raw_spin_unlock(&base->lock);
- call_timer_fn(timer, fn);
-+ base->running_timer = NULL;
- raw_spin_lock(&base->lock);
- } else {
- raw_spin_unlock_irq(&base->lock);
- call_timer_fn(timer, fn);
-+ base->running_timer = NULL;
- raw_spin_lock_irq(&base->lock);
- }
- }
-@@ -1680,8 +1714,8 @@ static inline void __run_timers(struct t
- while (levels--)
- expire_timers(base, heads + levels);
- }
-- base->running_timer = NULL;
- raw_spin_unlock_irq(&base->lock);
-+ wakeup_timer_waiters(base);
- }
-
- /*
-@@ -1926,6 +1960,9 @@ static void __init init_timer_cpu(int cp
- base->cpu = cpu;
- raw_spin_lock_init(&base->lock);
- base->clk = jiffies;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ init_swait_queue_head(&base->wait_for_running_timer);
-+#endif
- }
- }
-
+ call_timer_fn(timer, fn, baseclk);
+ base->running_timer = NULL;
diff --git a/debian/patches-rt/tpm-remove-tpm_dev_wq_lock.patch b/debian/patches-rt/tpm-remove-tpm_dev_wq_lock.patch
index 78fef73a7..746459e0d 100644
--- a/debian/patches-rt/tpm-remove-tpm_dev_wq_lock.patch
+++ b/debian/patches-rt/tpm-remove-tpm_dev_wq_lock.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 11 Feb 2019 11:33:11 +0100
Subject: [PATCH] tpm: remove tpm_dev_wq_lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Added in commit
@@ -19,11 +19,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
-@@ -25,7 +25,6 @@
+@@ -20,7 +20,6 @@
#include "tpm-dev.h"
static struct workqueue_struct *tpm_dev_wq;
-static DEFINE_MUTEX(tpm_dev_wq_lock);
- static void tpm_async_work(struct work_struct *work)
- {
+ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
+ u8 *buf, size_t bufsiz)
diff --git a/debian/patches-rt/tpm_tis-fix-stall-after-iowrite-s.patch b/debian/patches-rt/tpm_tis-fix-stall-after-iowrite-s.patch
index 58fcd2539..584aceb79 100644
--- a/debian/patches-rt/tpm_tis-fix-stall-after-iowrite-s.patch
+++ b/debian/patches-rt/tpm_tis-fix-stall-after-iowrite-s.patch
@@ -1,7 +1,7 @@
From: Haris Okanovic <haris.okanovic@ni.com>
Date: Tue, 15 Aug 2017 15:13:08 -0500
Subject: [PATCH] tpm_tis: fix stall after iowrite*()s
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
ioread8() operations to TPM MMIO addresses can stall the cpu when
immediately following a sequence of iowrite*()'s to the same region.
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
-@@ -53,6 +53,31 @@ static inline struct tpm_tis_tcg_phy *to
+@@ -49,6 +49,31 @@ static inline struct tpm_tis_tcg_phy *to
return container_of(data, struct tpm_tis_tcg_phy, priv);
}
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static bool interrupts = true;
module_param(interrupts, bool, 0444);
MODULE_PARM_DESC(interrupts, "Enable interrupts");
-@@ -150,7 +175,7 @@ static int tpm_tcg_write_bytes(struct tp
+@@ -146,7 +171,7 @@ static int tpm_tcg_write_bytes(struct tp
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
while (len--)
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -177,7 +202,7 @@ static int tpm_tcg_write32(struct tpm_ti
+@@ -173,7 +198,7 @@ static int tpm_tcg_write32(struct tpm_ti
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
diff --git a/debian/patches-rt/tty-serial-pl011-warning-about-uninitialized.patch b/debian/patches-rt/tty-serial-pl011-warning-about-uninitialized.patch
index 764b09153..d5a64d21c 100644
--- a/debian/patches-rt/tty-serial-pl011-warning-about-uninitialized.patch
+++ b/debian/patches-rt/tty-serial-pl011-warning-about-uninitialized.patch
@@ -4,7 +4,7 @@ Subject: [PATCH] tty: serial: pl011: explicitly initialize the flags variable
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Silence the following gcc warning:
diff --git a/debian/patches-rt/tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch b/debian/patches-rt/tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch
deleted file mode 100644
index 9ff71621a..000000000
--- a/debian/patches-rt/tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch
+++ /dev/null
@@ -1,56 +0,0 @@
-From: Julien Grall <julien.grall@arm.com>
-Date: Wed, 13 Mar 2019 11:40:34 +0000
-Subject: [PATCH] tty/sysrq: Convert show_lock to raw_spinlock_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Systems which don't provide arch_trigger_cpumask_backtrace() will
-invoke showacpu() from a smp_call_function() function which is invoked
-with disabled interrupts even on -RT systems.
-
-The function acquires the show_lock lock which only purpose is to
-ensure that the CPUs don't print simultaneously. Otherwise the
-output would clash and it would be hard to tell the output from CPUx
-apart from CPUy.
-
-On -RT the spin_lock() can not be acquired from this context. A
-raw_spin_lock() is required. It will introduce the system's latency
-by performing the sysrq request and other CPUs will block on the lock
-until the request is done. This is okay because the user asked for a
-backtrace of all active CPUs and under "normal circumstances in
-production" this path should not be triggered.
-
-Signed-off-by: Julien Grall <julien.grall@arm.com>
-[bigeasy@linuxtronix.de: commit description]
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Cc: stable-rt@vger.kernel.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/sysrq.c | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
-
---- a/drivers/tty/sysrq.c
-+++ b/drivers/tty/sysrq.c
-@@ -208,7 +208,7 @@ static struct sysrq_key_op sysrq_showloc
- #endif
-
- #ifdef CONFIG_SMP
--static DEFINE_SPINLOCK(show_lock);
-+static DEFINE_RAW_SPINLOCK(show_lock);
-
- static void showacpu(void *dummy)
- {
-@@ -218,10 +218,10 @@ static void showacpu(void *dummy)
- if (idle_cpu(smp_processor_id()))
- return;
-
-- spin_lock_irqsave(&show_lock, flags);
-+ raw_spin_lock_irqsave(&show_lock, flags);
- pr_info("CPU%d:\n", smp_processor_id());
- show_stack(NULL, NULL);
-- spin_unlock_irqrestore(&show_lock, flags);
-+ raw_spin_unlock_irqrestore(&show_lock, flags);
- }
-
- static void sysrq_showregs_othercpus(struct work_struct *dummy)
diff --git a/debian/patches-rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/debian/patches-rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
index 1172edb25..3bbc836f7 100644
--- a/debian/patches-rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
+++ b/debian/patches-rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
@@ -1,7 +1,7 @@
Subject: net: Remove preemption disabling in netif_rx()
From: Priyanka Jain <Priyanka.Jain@freescale.com>
Date: Thu, 17 May 2012 09:35:11 +0530
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
1)enqueue_to_backlog() (called from netif_rx) should be
bind to a particluar CPU. This can be achieved by
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4533,7 +4533,7 @@ static int netif_rx_internal(struct sk_b
+@@ -4510,7 +4510,7 @@ static int netif_rx_internal(struct sk_b
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -4543,14 +4543,14 @@ static int netif_rx_internal(struct sk_b
+@@ -4520,14 +4520,14 @@ static int netif_rx_internal(struct sk_b
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
diff --git a/debian/patches-rt/wait.h-include-atomic.h.patch b/debian/patches-rt/wait.h-include-atomic.h.patch
index 0ac6ae033..43245c61b 100644
--- a/debian/patches-rt/wait.h-include-atomic.h.patch
+++ b/debian/patches-rt/wait.h-include-atomic.h.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 28 Oct 2013 12:19:57 +0100
Subject: wait.h: include atomic.h
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
| CC init/main.o
|In file included from include/linux/mmzone.h:9:0,
diff --git a/debian/patches-rt/watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch b/debian/patches-rt/watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch
index 6d6149630..3de28f925 100644
--- a/debian/patches-rt/watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch
+++ b/debian/patches-rt/watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch
@@ -1,7 +1,7 @@
From: Julia Cartwright <julia@ni.com>
Date: Fri, 28 Sep 2018 21:03:51 +0000
Subject: [PATCH] watchdog: prevent deferral of watchdogd wakeup on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
When PREEMPT_RT_FULL is enabled, all hrtimer expiry functions are
deferred for execution into the context of ktimersoftd unless otherwise
diff --git a/debian/patches-rt/work-queue-work-around-irqsafe-timer-optimization.patch b/debian/patches-rt/work-queue-work-around-irqsafe-timer-optimization.patch
deleted file mode 100644
index 4f92a264b..000000000
--- a/debian/patches-rt/work-queue-work-around-irqsafe-timer-optimization.patch
+++ /dev/null
@@ -1,133 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 01 Jul 2013 11:02:42 +0200
-Subject: workqueue: Prevent workqueue versus ata-piix livelock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-An Intel i7 system regularly detected rcu_preempt stalls after the kernel
-was upgraded from 3.6-rt to 3.8-rt. When the stall happened, disk I/O was no
-longer possible, unless the system was restarted.
-
-The kernel message was:
-INFO: rcu_preempt self-detected stall on CPU { 6}
-[..]
-NMI backtrace for cpu 6
-CPU 6
-Pid: 119, comm: irq/19-ata_piix Not tainted 3.8.13-rt13 #11 Shuttle Inc. SX58/SX58
-RIP: 0010:[<ffffffff8124ca60>] [<ffffffff8124ca60>] ip_compute_csum+0x30/0x30
-RSP: 0018:ffff880333303cb0 EFLAGS: 00000002
-RAX: 0000000000000006 RBX: 00000000000003e9 RCX: 0000000000000034
-RDX: 0000000000000000 RSI: ffffffff81aa16d0 RDI: 0000000000000001
-RBP: ffff880333303ce8 R08: ffffffff81aa16d0 R09: ffffffff81c1b8cc
-R10: 0000000000000000 R11: 0000000000000000 R12: 000000000005161f
-R13: 0000000000000006 R14: ffffffff81aa16d0 R15: 0000000000000002
-FS: 0000000000000000(0000) GS:ffff880333300000(0000) knlGS:0000000000000000
-CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
-CR2: 0000003c1b2bb420 CR3: 0000000001a0f000 CR4: 00000000000007e0
-DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
-DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
-Process irq/19-ata_piix (pid: 119, threadinfo ffff88032d88a000, task ffff88032df80000)
-Stack:
-ffffffff8124cb32 000000000005161e 00000000000003e9 0000000000001000
-0000000000009022 ffffffff81aa16d0 0000000000000002 ffff880333303cf8
-ffffffff8124caa9 ffff880333303d08 ffffffff8124cad2 ffff880333303d28
-Call Trace:
-<IRQ>
-[<ffffffff8124cb32>] ? delay_tsc+0x33/0xe3
-[<ffffffff8124caa9>] __delay+0xf/0x11
-[<ffffffff8124cad2>] __const_udelay+0x27/0x29
-[<ffffffff8102d1fa>] native_safe_apic_wait_icr_idle+0x39/0x45
-[<ffffffff8102dc9b>] __default_send_IPI_dest_field.constprop.0+0x1e/0x58
-[<ffffffff8102dd1e>] default_send_IPI_mask_sequence_phys+0x49/0x7d
-[<ffffffff81030326>] physflat_send_IPI_all+0x17/0x19
-[<ffffffff8102de53>] arch_trigger_all_cpu_backtrace+0x50/0x79
-[<ffffffff810b21d0>] rcu_check_callbacks+0x1cb/0x568
-[<ffffffff81048c9c>] ? raise_softirq+0x2e/0x35
-[<ffffffff81086be0>] ? tick_sched_do_timer+0x38/0x38
-[<ffffffff8104f653>] update_process_times+0x44/0x55
-[<ffffffff81086866>] tick_sched_handle+0x4a/0x59
-[<ffffffff81086c1c>] tick_sched_timer+0x3c/0x5b
-[<ffffffff81062845>] __run_hrtimer+0x9b/0x158
-[<ffffffff810631d8>] hrtimer_interrupt+0x172/0x2aa
-[<ffffffff8102d498>] smp_apic_timer_interrupt+0x76/0x89
-[<ffffffff814d881d>] apic_timer_interrupt+0x6d/0x80
-<EOI>
-[<ffffffff81057cd2>] ? __local_lock_irqsave+0x17/0x4a
-[<ffffffff81059336>] try_to_grab_pending+0x42/0x17e
-[<ffffffff8105a699>] mod_delayed_work_on+0x32/0x88
-[<ffffffff8105a70b>] mod_delayed_work+0x1c/0x1e
-[<ffffffff8122ae84>] blk_run_queue_async+0x37/0x39
-[<ffffffff81230985>] flush_end_io+0xf1/0x107
-[<ffffffff8122e0da>] blk_finish_request+0x21e/0x264
-[<ffffffff8122e162>] blk_end_bidi_request+0x42/0x60
-[<ffffffff8122e1ba>] blk_end_request+0x10/0x12
-[<ffffffff8132de46>] scsi_io_completion+0x1bf/0x492
-[<ffffffff81335cec>] ? sd_done+0x298/0x2ef
-[<ffffffff81325a02>] scsi_finish_command+0xe9/0xf2
-[<ffffffff8132dbcb>] scsi_softirq_done+0x106/0x10f
-[<ffffffff812333d3>] blk_done_softirq+0x77/0x87
-[<ffffffff8104826f>] do_current_softirqs+0x172/0x2e1
-[<ffffffff810aa820>] ? irq_thread_fn+0x3a/0x3a
-[<ffffffff81048466>] local_bh_enable+0x43/0x72
-[<ffffffff810aa866>] irq_forced_thread_fn+0x46/0x52
-[<ffffffff810ab089>] irq_thread+0x8c/0x17c
-[<ffffffff810ab179>] ? irq_thread+0x17c/0x17c
-[<ffffffff810aaffd>] ? wake_threads_waitq+0x44/0x44
-[<ffffffff8105eb18>] kthread+0x8d/0x95
-[<ffffffff8105ea8b>] ? __kthread_parkme+0x65/0x65
-[<ffffffff814d7b7c>] ret_from_fork+0x7c/0xb0
-[<ffffffff8105ea8b>] ? __kthread_parkme+0x65/0x65
-
-The state of softirqd of this CPU at the time of the crash was:
-ksoftirqd/6 R running task 0 53 2 0x00000000
-ffff88032fc39d18 0000000000000046 ffff88033330c4c0 ffff8803303f4710
-ffff88032fc39fd8 ffff88032fc39fd8 0000000000000000 0000000000062500
-ffff88032df88000 ffff8803303f4710 0000000000000000 ffff88032fc38000
-Call Trace:
-[<ffffffff8105a3ae>] ? __queue_work+0x27c/0x27c
-[<ffffffff814d178c>] preempt_schedule+0x61/0x76
-[<ffffffff8106cccf>] migrate_enable+0xe5/0x1df
-[<ffffffff8105a3ae>] ? __queue_work+0x27c/0x27c
-[<ffffffff8104ef52>] run_timer_softirq+0x161/0x1d6
-[<ffffffff8104826f>] do_current_softirqs+0x172/0x2e1
-[<ffffffff8104840b>] run_ksoftirqd+0x2d/0x45
-[<ffffffff8106658a>] smpboot_thread_fn+0x2ea/0x308
-[<ffffffff810662a0>] ? test_ti_thread_flag+0xc/0xc
-[<ffffffff810662a0>] ? test_ti_thread_flag+0xc/0xc
-[<ffffffff8105eb18>] kthread+0x8d/0x95
-[<ffffffff8105ea8b>] ? __kthread_parkme+0x65/0x65
-[<ffffffff814d7afc>] ret_from_fork+0x7c/0xb0
-[<ffffffff8105ea8b>] ? __kthread_parkme+0x65/0x65
-
-Apparently, the softirq demon and the ata_piix IRQ handler were waiting
-for each other to finish ending up in a livelock. After the below patch
-was applied, the system no longer crashes.
-
-Reported-by: Carsten Emde <C.Emde@osadl.org>
-Proposed-by: Thomas Gleixner <tglx@linutronix.de>
-Tested by: Carsten Emde <C.Emde@osadl.org>
-Signed-off-by: Carsten Emde <C.Emde@osadl.org>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/workqueue.c | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
---- a/kernel/workqueue.c
-+++ b/kernel/workqueue.c
-@@ -50,6 +50,7 @@
- #include <linux/sched/isolation.h>
- #include <linux/nmi.h>
- #include <linux/locallock.h>
-+#include <linux/delay.h>
-
- #include "workqueue_internal.h"
-
-@@ -1301,7 +1302,7 @@ static int try_to_grab_pending(struct wo
- local_unlock_irqrestore(pendingb_lock, *flags);
- if (work_is_canceling(work))
- return -ENOENT;
-- cpu_relax();
-+ cpu_chill();
- return -EAGAIN;
- }
-
diff --git a/debian/patches-rt/workqueue-Convert-the-locks-to-raw-type.patch b/debian/patches-rt/workqueue-Convert-the-locks-to-raw-type.patch
new file mode 100644
index 000000000..2aad94b11
--- /dev/null
+++ b/debian/patches-rt/workqueue-Convert-the-locks-to-raw-type.patch
@@ -0,0 +1,697 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 22 May 2019 12:43:56 +0200
+Subject: [PATCH] workqueue: Convert the locks to raw type
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+After all the workqueue and the timer rework, we can finally make the
+worker_pool lock raw.
+The lock is not held over an unbounded period of time/iterations.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/workqueue.c | 175 ++++++++++++++++++++++++++---------------------------
+ 1 file changed, 89 insertions(+), 86 deletions(-)
+
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -50,6 +50,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/sched/isolation.h>
+ #include <linux/nmi.h>
++#include <linux/swait.h>
+
+ #include "workqueue_internal.h"
+
+@@ -145,7 +146,7 @@ enum {
+ /* struct worker is defined in workqueue_internal.h */
+
+ struct worker_pool {
+- spinlock_t lock; /* the pool lock */
++ raw_spinlock_t lock; /* the pool lock */
+ int cpu; /* I: the associated cpu */
+ int node; /* I: the associated node ID */
+ int id; /* I: pool ID */
+@@ -300,8 +301,8 @@ static struct workqueue_attrs *wq_update
+
+ static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
+ static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
+-static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
+-static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
++static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
++static DECLARE_SWAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
+
+ static LIST_HEAD(workqueues); /* PR: list of all workqueues */
+ static bool workqueue_freezing; /* PL: have wqs started freezing? */
+@@ -831,7 +832,7 @@ static struct worker *first_idle_worker(
+ * Wake up the first idle worker of @pool.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void wake_up_worker(struct worker_pool *pool)
+ {
+@@ -884,7 +885,7 @@ void wq_worker_sleeping(struct task_stru
+ return;
+
+ worker->sleeping = 1;
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ /*
+ * The counterpart of the following dec_and_test, implied mb,
+@@ -903,7 +904,7 @@ void wq_worker_sleeping(struct task_stru
+ if (next)
+ wake_up_process(next->task);
+ }
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ }
+
+ /**
+@@ -914,7 +915,7 @@ void wq_worker_sleeping(struct task_stru
+ * the scheduler to get a worker's last known identity.
+ *
+ * CONTEXT:
+- * spin_lock_irq(rq->lock)
++ * raw_spin_lock_irq(rq->lock)
+ *
+ * This function is called during schedule() when a kworker is going
+ * to sleep. It's used by psi to identify aggregation workers during
+@@ -945,7 +946,7 @@ work_func_t wq_worker_last_func(struct t
+ * Set @flags in @worker->flags and adjust nr_running accordingly.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock)
++ * raw_spin_lock_irq(pool->lock)
+ */
+ static inline void worker_set_flags(struct worker *worker, unsigned int flags)
+ {
+@@ -970,7 +971,7 @@ static inline void worker_set_flags(stru
+ * Clear @flags in @worker->flags and adjust nr_running accordingly.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock)
++ * raw_spin_lock_irq(pool->lock)
+ */
+ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
+ {
+@@ -1018,7 +1019,7 @@ static inline void worker_clr_flags(stru
+ * actually occurs, it should be easy to locate the culprit work function.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ *
+ * Return:
+ * Pointer to worker which is executing @work if found, %NULL
+@@ -1053,7 +1054,7 @@ static struct worker *find_worker_execut
+ * nested inside outer list_for_each_entry_safe().
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void move_linked_works(struct work_struct *work, struct list_head *head,
+ struct work_struct **nextp)
+@@ -1131,9 +1132,9 @@ static void put_pwq_unlocked(struct pool
+ * As both pwqs and pools are RCU protected, the
+ * following lock operations are safe.
+ */
+- spin_lock_irq(&pwq->pool->lock);
++ raw_spin_lock_irq(&pwq->pool->lock);
+ put_pwq(pwq);
+- spin_unlock_irq(&pwq->pool->lock);
++ raw_spin_unlock_irq(&pwq->pool->lock);
+ }
+ }
+
+@@ -1166,7 +1167,7 @@ static void pwq_activate_first_delayed(s
+ * decrement nr_in_flight of its pwq and handle workqueue flushing.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
+ {
+@@ -1265,7 +1266,7 @@ static int try_to_grab_pending(struct wo
+ if (!pool)
+ goto fail;
+
+- spin_lock(&pool->lock);
++ raw_spin_lock(&pool->lock);
+ /*
+ * work->data is guaranteed to point to pwq only while the work
+ * item is queued on pwq->wq, and both updating work->data to point
+@@ -1294,11 +1295,11 @@ static int try_to_grab_pending(struct wo
+ /* work->data points to pwq iff queued, point to pool */
+ set_work_pool_and_keep_pending(work, pool->id);
+
+- spin_unlock(&pool->lock);
++ raw_spin_unlock(&pool->lock);
+ rcu_read_unlock();
+ return 1;
+ }
+- spin_unlock(&pool->lock);
++ raw_spin_unlock(&pool->lock);
+ fail:
+ rcu_read_unlock();
+ local_irq_restore(*flags);
+@@ -1319,7 +1320,7 @@ static int try_to_grab_pending(struct wo
+ * work_struct flags.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
+ struct list_head *head, unsigned int extra_flags)
+@@ -1434,7 +1435,7 @@ static void __queue_work(int cpu, struct
+ if (last_pool && last_pool != pwq->pool) {
+ struct worker *worker;
+
+- spin_lock(&last_pool->lock);
++ raw_spin_lock(&last_pool->lock);
+
+ worker = find_worker_executing_work(last_pool, work);
+
+@@ -1442,11 +1443,11 @@ static void __queue_work(int cpu, struct
+ pwq = worker->current_pwq;
+ } else {
+ /* meh... not running there, queue here */
+- spin_unlock(&last_pool->lock);
+- spin_lock(&pwq->pool->lock);
++ raw_spin_unlock(&last_pool->lock);
++ raw_spin_lock(&pwq->pool->lock);
+ }
+ } else {
+- spin_lock(&pwq->pool->lock);
++ raw_spin_lock(&pwq->pool->lock);
+ }
+
+ /*
+@@ -1459,7 +1460,7 @@ static void __queue_work(int cpu, struct
+ */
+ if (unlikely(!pwq->refcnt)) {
+ if (wq->flags & WQ_UNBOUND) {
+- spin_unlock(&pwq->pool->lock);
++ raw_spin_unlock(&pwq->pool->lock);
+ cpu_relax();
+ goto retry;
+ }
+@@ -1491,7 +1492,7 @@ static void __queue_work(int cpu, struct
+ insert_work(pwq, work, worklist, work_flags);
+
+ out:
+- spin_unlock(&pwq->pool->lock);
++ raw_spin_unlock(&pwq->pool->lock);
+ rcu_read_unlock();
+ }
+
+@@ -1611,9 +1612,11 @@ EXPORT_SYMBOL_GPL(queue_work_node);
+ void delayed_work_timer_fn(struct timer_list *t)
+ {
+ struct delayed_work *dwork = from_timer(dwork, t, timer);
++ unsigned long flags;
+
+- /* should have been called from irqsafe timer with irq already off */
++ local_irq_save(flags);
+ __queue_work(dwork->cpu, dwork->wq, &dwork->work);
++ local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL(delayed_work_timer_fn);
+
+@@ -1760,7 +1763,7 @@ EXPORT_SYMBOL(queue_rcu_work);
+ * necessary.
+ *
+ * LOCKING:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void worker_enter_idle(struct worker *worker)
+ {
+@@ -1800,7 +1803,7 @@ static void worker_enter_idle(struct wor
+ * @worker is leaving idle state. Update stats.
+ *
+ * LOCKING:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void worker_leave_idle(struct worker *worker)
+ {
+@@ -1938,11 +1941,11 @@ static struct worker *create_worker(stru
+ worker_attach_to_pool(worker, pool);
+
+ /* start the newly created worker */
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+ worker->pool->nr_workers++;
+ worker_enter_idle(worker);
+ wake_up_process(worker->task);
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ return worker;
+
+@@ -1961,7 +1964,7 @@ static struct worker *create_worker(stru
+ * be idle.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void destroy_worker(struct worker *worker)
+ {
+@@ -1987,7 +1990,7 @@ static void idle_worker_timeout(struct t
+ {
+ struct worker_pool *pool = from_timer(pool, t, idle_timer);
+
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ while (too_many_workers(pool)) {
+ struct worker *worker;
+@@ -2005,7 +2008,7 @@ static void idle_worker_timeout(struct t
+ destroy_worker(worker);
+ }
+
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ }
+
+ static void send_mayday(struct work_struct *work)
+@@ -2036,8 +2039,8 @@ static void pool_mayday_timeout(struct t
+ struct worker_pool *pool = from_timer(pool, t, mayday_timer);
+ struct work_struct *work;
+
+- spin_lock_irq(&pool->lock);
+- spin_lock(&wq_mayday_lock); /* for wq->maydays */
++ raw_spin_lock_irq(&pool->lock);
++ raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */
+
+ if (need_to_create_worker(pool)) {
+ /*
+@@ -2050,8 +2053,8 @@ static void pool_mayday_timeout(struct t
+ send_mayday(work);
+ }
+
+- spin_unlock(&wq_mayday_lock);
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock(&wq_mayday_lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
+ }
+@@ -2070,7 +2073,7 @@ static void pool_mayday_timeout(struct t
+ * may_start_working() %true.
+ *
+ * LOCKING:
+- * spin_lock_irq(pool->lock) which may be released and regrabbed
++ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
+ * multiple times. Does GFP_KERNEL allocations. Called only from
+ * manager.
+ */
+@@ -2079,7 +2082,7 @@ static void maybe_create_worker(struct w
+ __acquires(&pool->lock)
+ {
+ restart:
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
+ mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
+@@ -2095,7 +2098,7 @@ static void maybe_create_worker(struct w
+ }
+
+ del_timer_sync(&pool->mayday_timer);
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+ /*
+ * This is necessary even after a new worker was just successfully
+ * created as @pool->lock was dropped and the new worker might have
+@@ -2118,7 +2121,7 @@ static void maybe_create_worker(struct w
+ * and may_start_working() is true.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock) which may be released and regrabbed
++ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
+ * multiple times. Does GFP_KERNEL allocations.
+ *
+ * Return:
+@@ -2141,7 +2144,7 @@ static bool manage_workers(struct worker
+
+ pool->manager = NULL;
+ pool->flags &= ~POOL_MANAGER_ACTIVE;
+- wake_up(&wq_manager_wait);
++ swake_up_one(&wq_manager_wait);
+ return true;
+ }
+
+@@ -2157,7 +2160,7 @@ static bool manage_workers(struct worker
+ * call this function to process a work.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock) which is released and regrabbed.
++ * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
+ */
+ static void process_one_work(struct worker *worker, struct work_struct *work)
+ __releases(&pool->lock)
+@@ -2239,7 +2242,7 @@ static void process_one_work(struct work
+ */
+ set_work_pool_and_clear_pending(work, pool->id);
+
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ lock_map_acquire(&pwq->wq->lockdep_map);
+ lock_map_acquire(&lockdep_map);
+@@ -2294,7 +2297,7 @@ static void process_one_work(struct work
+ */
+ cond_resched();
+
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ /* clear cpu intensive status */
+ if (unlikely(cpu_intensive))
+@@ -2320,7 +2323,7 @@ static void process_one_work(struct work
+ * fetches a work from the top and executes it.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock) which may be released and regrabbed
++ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
+ * multiple times.
+ */
+ static void process_scheduled_works(struct worker *worker)
+@@ -2362,11 +2365,11 @@ static int worker_thread(void *__worker)
+ /* tell the scheduler that this is a workqueue worker */
+ set_pf_worker(true);
+ woke_up:
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ /* am I supposed to die? */
+ if (unlikely(worker->flags & WORKER_DIE)) {
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ WARN_ON_ONCE(!list_empty(&worker->entry));
+ set_pf_worker(false);
+
+@@ -2432,7 +2435,7 @@ static int worker_thread(void *__worker)
+ */
+ worker_enter_idle(worker);
+ __set_current_state(TASK_IDLE);
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ schedule();
+ goto woke_up;
+ }
+@@ -2486,7 +2489,7 @@ static int rescuer_thread(void *__rescue
+ should_stop = kthread_should_stop();
+
+ /* see whether any pwq is asking for help */
+- spin_lock_irq(&wq_mayday_lock);
++ raw_spin_lock_irq(&wq_mayday_lock);
+
+ while (!list_empty(&wq->maydays)) {
+ struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
+@@ -2498,11 +2501,11 @@ static int rescuer_thread(void *__rescue
+ __set_current_state(TASK_RUNNING);
+ list_del_init(&pwq->mayday_node);
+
+- spin_unlock_irq(&wq_mayday_lock);
++ raw_spin_unlock_irq(&wq_mayday_lock);
+
+ worker_attach_to_pool(rescuer, pool);
+
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ /*
+ * Slurp in all works issued via this workqueue and
+@@ -2531,10 +2534,10 @@ static int rescuer_thread(void *__rescue
+ * incur MAYDAY_INTERVAL delay inbetween.
+ */
+ if (need_to_create_worker(pool)) {
+- spin_lock(&wq_mayday_lock);
++ raw_spin_lock(&wq_mayday_lock);
+ get_pwq(pwq);
+ list_move_tail(&pwq->mayday_node, &wq->maydays);
+- spin_unlock(&wq_mayday_lock);
++ raw_spin_unlock(&wq_mayday_lock);
+ }
+ }
+
+@@ -2552,14 +2555,14 @@ static int rescuer_thread(void *__rescue
+ if (need_more_worker(pool))
+ wake_up_worker(pool);
+
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ worker_detach_from_pool(rescuer);
+
+- spin_lock_irq(&wq_mayday_lock);
++ raw_spin_lock_irq(&wq_mayday_lock);
+ }
+
+- spin_unlock_irq(&wq_mayday_lock);
++ raw_spin_unlock_irq(&wq_mayday_lock);
+
+ if (should_stop) {
+ __set_current_state(TASK_RUNNING);
+@@ -2639,7 +2642,7 @@ static void wq_barrier_func(struct work_
+ * underneath us, so we can't reliably determine pwq from @target.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void insert_wq_barrier(struct pool_workqueue *pwq,
+ struct wq_barrier *barr,
+@@ -2726,7 +2729,7 @@ static bool flush_workqueue_prep_pwqs(st
+ for_each_pwq(pwq, wq) {
+ struct worker_pool *pool = pwq->pool;
+
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ if (flush_color >= 0) {
+ WARN_ON_ONCE(pwq->flush_color != -1);
+@@ -2743,7 +2746,7 @@ static bool flush_workqueue_prep_pwqs(st
+ pwq->work_color = work_color;
+ }
+
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ }
+
+ if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
+@@ -2943,9 +2946,9 @@ void drain_workqueue(struct workqueue_st
+ for_each_pwq(pwq, wq) {
+ bool drained;
+
+- spin_lock_irq(&pwq->pool->lock);
++ raw_spin_lock_irq(&pwq->pool->lock);
+ drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
+- spin_unlock_irq(&pwq->pool->lock);
++ raw_spin_unlock_irq(&pwq->pool->lock);
+
+ if (drained)
+ continue;
+@@ -2981,7 +2984,7 @@ static bool start_flush_work(struct work
+ return false;
+ }
+
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+ /* see the comment in try_to_grab_pending() with the same code */
+ pwq = get_work_pwq(work);
+ if (pwq) {
+@@ -2997,7 +3000,7 @@ static bool start_flush_work(struct work
+ check_flush_dependency(pwq->wq, work);
+
+ insert_wq_barrier(pwq, barr, work, worker);
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ /*
+ * Force a lock recursion deadlock when using flush_work() inside a
+@@ -3016,7 +3019,7 @@ static bool start_flush_work(struct work
+ rcu_read_unlock();
+ return true;
+ already_gone:
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ rcu_read_unlock();
+ return false;
+ }
+@@ -3409,7 +3412,7 @@ static bool wqattrs_equal(const struct w
+ */
+ static int init_worker_pool(struct worker_pool *pool)
+ {
+- spin_lock_init(&pool->lock);
++ raw_spin_lock_init(&pool->lock);
+ pool->id = -1;
+ pool->cpu = -1;
+ pool->node = NUMA_NO_NODE;
+@@ -3535,15 +3538,15 @@ static void put_unbound_pool(struct work
+ * @pool's workers from blocking on attach_mutex. We're the last
+ * manager and @pool gets freed with the flag set.
+ */
+- spin_lock_irq(&pool->lock);
+- wait_event_lock_irq(wq_manager_wait,
++ raw_spin_lock_irq(&pool->lock);
++ swait_event_lock_irq(wq_manager_wait,
+ !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
+ pool->flags |= POOL_MANAGER_ACTIVE;
+
+ while ((worker = first_idle_worker(pool)))
+ destroy_worker(worker);
+ WARN_ON(pool->nr_workers || pool->nr_idle);
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ mutex_lock(&wq_pool_attach_mutex);
+ if (!list_empty(&pool->workers))
+@@ -3699,7 +3702,7 @@ static void pwq_adjust_max_active(struct
+ return;
+
+ /* this function can be called during early boot w/ irq disabled */
+- spin_lock_irqsave(&pwq->pool->lock, flags);
++ raw_spin_lock_irqsave(&pwq->pool->lock, flags);
+
+ /*
+ * During [un]freezing, the caller is responsible for ensuring that
+@@ -3722,7 +3725,7 @@ static void pwq_adjust_max_active(struct
+ pwq->max_active = 0;
+ }
+
+- spin_unlock_irqrestore(&pwq->pool->lock, flags);
++ raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
+ }
+
+ /* initialize newly alloced @pwq which is associated with @wq and @pool */
+@@ -4120,9 +4123,9 @@ static void wq_update_unbound_numa(struc
+
+ use_dfl_pwq:
+ mutex_lock(&wq->mutex);
+- spin_lock_irq(&wq->dfl_pwq->pool->lock);
++ raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
+ get_pwq(wq->dfl_pwq);
+- spin_unlock_irq(&wq->dfl_pwq->pool->lock);
++ raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
+ old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
+ out_unlock:
+ mutex_unlock(&wq->mutex);
+@@ -4513,10 +4516,10 @@ unsigned int work_busy(struct work_struc
+ rcu_read_lock();
+ pool = get_work_pool(work);
+ if (pool) {
+- spin_lock_irqsave(&pool->lock, flags);
++ raw_spin_lock_irqsave(&pool->lock, flags);
+ if (find_worker_executing_work(pool, work))
+ ret |= WORK_BUSY_RUNNING;
+- spin_unlock_irqrestore(&pool->lock, flags);
++ raw_spin_unlock_irqrestore(&pool->lock, flags);
+ }
+ rcu_read_unlock();
+
+@@ -4722,10 +4725,10 @@ void show_workqueue_state(void)
+ pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
+
+ for_each_pwq(pwq, wq) {
+- spin_lock_irqsave(&pwq->pool->lock, flags);
++ raw_spin_lock_irqsave(&pwq->pool->lock, flags);
+ if (pwq->nr_active || !list_empty(&pwq->delayed_works))
+ show_pwq(pwq);
+- spin_unlock_irqrestore(&pwq->pool->lock, flags);
++ raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
+ /*
+ * We could be printing a lot from atomic context, e.g.
+ * sysrq-t -> show_workqueue_state(). Avoid triggering
+@@ -4739,7 +4742,7 @@ void show_workqueue_state(void)
+ struct worker *worker;
+ bool first = true;
+
+- spin_lock_irqsave(&pool->lock, flags);
++ raw_spin_lock_irqsave(&pool->lock, flags);
+ if (pool->nr_workers == pool->nr_idle)
+ goto next_pool;
+
+@@ -4758,7 +4761,7 @@ void show_workqueue_state(void)
+ }
+ pr_cont("\n");
+ next_pool:
+- spin_unlock_irqrestore(&pool->lock, flags);
++ raw_spin_unlock_irqrestore(&pool->lock, flags);
+ /*
+ * We could be printing a lot from atomic context, e.g.
+ * sysrq-t -> show_workqueue_state(). Avoid triggering
+@@ -4788,7 +4791,7 @@ void wq_worker_comm(char *buf, size_t si
+ struct worker_pool *pool = worker->pool;
+
+ if (pool) {
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+ /*
+ * ->desc tracks information (wq name or
+ * set_worker_desc()) for the latest execution. If
+@@ -4802,7 +4805,7 @@ void wq_worker_comm(char *buf, size_t si
+ scnprintf(buf + off, size - off, "-%s",
+ worker->desc);
+ }
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ }
+ }
+
+@@ -4833,7 +4836,7 @@ static void unbind_workers(int cpu)
+
+ for_each_cpu_worker_pool(pool, cpu) {
+ mutex_lock(&wq_pool_attach_mutex);
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ /*
+ * We've blocked all attach/detach operations. Make all workers
+@@ -4847,7 +4850,7 @@ static void unbind_workers(int cpu)
+
+ pool->flags |= POOL_DISASSOCIATED;
+
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ mutex_unlock(&wq_pool_attach_mutex);
+
+ /*
+@@ -4873,9 +4876,9 @@ static void unbind_workers(int cpu)
+ * worker blocking could lead to lengthy stalls. Kick off
+ * unbound chain execution of currently pending work items.
+ */
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+ wake_up_worker(pool);
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ }
+ }
+
+@@ -4902,7 +4905,7 @@ static void rebind_workers(struct worker
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
+ pool->attrs->cpumask) < 0);
+
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ pool->flags &= ~POOL_DISASSOCIATED;
+
+@@ -4941,7 +4944,7 @@ static void rebind_workers(struct worker
+ WRITE_ONCE(worker->flags, worker_flags);
+ }
+
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ }
+
+ /**
diff --git a/debian/patches-rt/workqueue-Make-alloc-apply-free_workqueue_attrs-stat.patch b/debian/patches-rt/workqueue-Make-alloc-apply-free_workqueue_attrs-stat.patch
new file mode 100644
index 000000000..8d674f120
--- /dev/null
+++ b/debian/patches-rt/workqueue-Make-alloc-apply-free_workqueue_attrs-stat.patch
@@ -0,0 +1,65 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 21 May 2019 16:35:12 +0200
+Subject: [PATCH] workqueue: Make alloc/apply/free_workqueue_attrs() static
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+None of those functions have any users outside of workqueue.c. Confine
+them.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/workqueue.h | 4 ----
+ kernel/workqueue.c | 7 +++----
+ 2 files changed, 3 insertions(+), 8 deletions(-)
+
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -435,10 +435,6 @@ struct workqueue_struct *alloc_workqueue
+
+ extern void destroy_workqueue(struct workqueue_struct *wq);
+
+-struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
+-void free_workqueue_attrs(struct workqueue_attrs *attrs);
+-int apply_workqueue_attrs(struct workqueue_struct *wq,
+- const struct workqueue_attrs *attrs);
+ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
+
+ extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3329,7 +3329,7 @@ EXPORT_SYMBOL_GPL(execute_in_process_con
+ *
+ * Undo alloc_workqueue_attrs().
+ */
+-void free_workqueue_attrs(struct workqueue_attrs *attrs)
++static void free_workqueue_attrs(struct workqueue_attrs *attrs)
+ {
+ if (attrs) {
+ free_cpumask_var(attrs->cpumask);
+@@ -3346,7 +3346,7 @@ void free_workqueue_attrs(struct workque
+ *
+ * Return: The allocated new workqueue_attr on success. %NULL on failure.
+ */
+-struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
++static struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
+ {
+ struct workqueue_attrs *attrs;
+
+@@ -4033,7 +4033,7 @@ static int apply_workqueue_attrs_locked(
+ *
+ * Return: 0 on success and -errno on failure.
+ */
+-int apply_workqueue_attrs(struct workqueue_struct *wq,
++static int apply_workqueue_attrs(struct workqueue_struct *wq,
+ const struct workqueue_attrs *attrs)
+ {
+ int ret;
+@@ -4044,7 +4044,6 @@ int apply_workqueue_attrs(struct workque
+
+ return ret;
+ }
+-EXPORT_SYMBOL_GPL(apply_workqueue_attrs);
+
+ /**
+ * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
diff --git a/debian/patches-rt/workqueue-Remove-GPF-argument-from-alloc_workqueue_a.patch b/debian/patches-rt/workqueue-Remove-GPF-argument-from-alloc_workqueue_a.patch
new file mode 100644
index 000000000..9a8f23ba6
--- /dev/null
+++ b/debian/patches-rt/workqueue-Remove-GPF-argument-from-alloc_workqueue_a.patch
@@ -0,0 +1,106 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 21 May 2019 16:39:56 +0200
+Subject: [PATCH] workqueue: Remove GPF argument from
+ alloc_workqueue_attrs()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+All callers use GFP_KERNEL. No point in having that argument.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/workqueue.c | 23 +++++++++++------------
+ 1 file changed, 11 insertions(+), 12 deletions(-)
+
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3339,21 +3339,20 @@ static void free_workqueue_attrs(struct
+
+ /**
+ * alloc_workqueue_attrs - allocate a workqueue_attrs
+- * @gfp_mask: allocation mask to use
+ *
+ * Allocate a new workqueue_attrs, initialize with default settings and
+ * return it.
+ *
+ * Return: The allocated new workqueue_attr on success. %NULL on failure.
+ */
+-static struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
++static struct workqueue_attrs *alloc_workqueue_attrs(void)
+ {
+ struct workqueue_attrs *attrs;
+
+- attrs = kzalloc(sizeof(*attrs), gfp_mask);
++ attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ goto fail;
+- if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
++ if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
+ goto fail;
+
+ cpumask_copy(attrs->cpumask, cpu_possible_mask);
+@@ -3431,7 +3430,7 @@ static int init_worker_pool(struct worke
+ pool->refcnt = 1;
+
+ /* shouldn't fail above this point */
+- pool->attrs = alloc_workqueue_attrs(GFP_KERNEL);
++ pool->attrs = alloc_workqueue_attrs();
+ if (!pool->attrs)
+ return -ENOMEM;
+ return 0;
+@@ -3896,8 +3895,8 @@ apply_wqattrs_prepare(struct workqueue_s
+
+ ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
+
+- new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
+- tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
++ new_attrs = alloc_workqueue_attrs();
++ tmp_attrs = alloc_workqueue_attrs();
+ if (!ctx || !new_attrs || !tmp_attrs)
+ goto out_free;
+
+@@ -4241,7 +4240,7 @@ struct workqueue_struct *alloc_workqueue
+ return NULL;
+
+ if (flags & WQ_UNBOUND) {
+- wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL);
++ wq->unbound_attrs = alloc_workqueue_attrs();
+ if (!wq->unbound_attrs)
+ goto err_free_wq;
+ }
+@@ -5394,7 +5393,7 @@ static struct workqueue_attrs *wq_sysfs_
+
+ lockdep_assert_held(&wq_pool_mutex);
+
+- attrs = alloc_workqueue_attrs(GFP_KERNEL);
++ attrs = alloc_workqueue_attrs();
+ if (!attrs)
+ return NULL;
+
+@@ -5816,7 +5815,7 @@ static void __init wq_numa_init(void)
+ return;
+ }
+
+- wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL);
++ wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
+ BUG_ON(!wq_update_unbound_numa_attrs_buf);
+
+ /*
+@@ -5891,7 +5890,7 @@ int __init workqueue_init_early(void)
+ for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
+ struct workqueue_attrs *attrs;
+
+- BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
++ BUG_ON(!(attrs = alloc_workqueue_attrs()));
+ attrs->nice = std_nice[i];
+ unbound_std_wq_attrs[i] = attrs;
+
+@@ -5900,7 +5899,7 @@ int __init workqueue_init_early(void)
+ * guaranteed by max_active which is enforced by pwqs.
+ * Turn off NUMA so that dfl_pwq is used for all nodes.
+ */
+- BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
++ BUG_ON(!(attrs = alloc_workqueue_attrs()));
+ attrs->nice = std_nice[i];
+ attrs->no_numa = true;
+ ordered_wq_attrs[i] = attrs;
diff --git a/debian/patches-rt/workqueue-distangle-from-rq-lock.patch b/debian/patches-rt/workqueue-distangle-from-rq-lock.patch
deleted file mode 100644
index 68803f543..000000000
--- a/debian/patches-rt/workqueue-distangle-from-rq-lock.patch
+++ /dev/null
@@ -1,283 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Wed Jun 22 19:47:03 2011 +0200
-Subject: sched: Distangle worker accounting from rqlock
-
-The worker accounting for cpu bound workers is plugged into the core
-scheduler code and the wakeup code. This is not a hard requirement and
-can be avoided by keeping track of the state in the workqueue code
-itself.
-
-Keep track of the sleeping state in the worker itself and call the
-notifier before entering the core scheduler. There might be false
-positives when the task is woken between that call and actually
-scheduling, but that's not really different from scheduling and being
-woken immediately after switching away. There is also no harm from
-updating nr_running when the task returns from scheduling instead of
-accounting it in the wakeup code.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Tejun Heo <tj@kernel.org>
-Cc: Jens Axboe <axboe@kernel.dk>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Link: http://lkml.kernel.org/r/20110622174919.135236139@linutronix.de
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-[bigeasy: preempt_disable() around wq_worker_sleeping() by Daniel Bristot de
- Oliveira]
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/sched/core.c | 90 ++++++++++----------------------------------
- kernel/workqueue.c | 52 +++++++++++--------------
- kernel/workqueue_internal.h | 5 +-
- 3 files changed, 47 insertions(+), 100 deletions(-)
-
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -1722,10 +1722,6 @@ static inline void ttwu_activate(struct
- {
- activate_task(rq, p, en_flags);
- p->on_rq = TASK_ON_RQ_QUEUED;
--
-- /* If a worker is waking up, notify the workqueue: */
-- if (p->flags & PF_WQ_WORKER)
-- wq_worker_waking_up(p, cpu_of(rq));
- }
-
- /*
-@@ -2163,56 +2159,6 @@ try_to_wake_up(struct task_struct *p, un
- }
-
- /**
-- * try_to_wake_up_local - try to wake up a local task with rq lock held
-- * @p: the thread to be awakened
-- * @rf: request-queue flags for pinning
-- *
-- * Put @p on the run-queue if it's not already there. The caller must
-- * ensure that this_rq() is locked, @p is bound to this_rq() and not
-- * the current task.
-- */
--static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf)
--{
-- struct rq *rq = task_rq(p);
--
-- if (WARN_ON_ONCE(rq != this_rq()) ||
-- WARN_ON_ONCE(p == current))
-- return;
--
-- lockdep_assert_held(&rq->lock);
--
-- if (!raw_spin_trylock(&p->pi_lock)) {
-- /*
-- * This is OK, because current is on_cpu, which avoids it being
-- * picked for load-balance and preemption/IRQs are still
-- * disabled avoiding further scheduler activity on it and we've
-- * not yet picked a replacement task.
-- */
-- rq_unlock(rq, rf);
-- raw_spin_lock(&p->pi_lock);
-- rq_relock(rq, rf);
-- }
--
-- if (!(p->state & TASK_NORMAL))
-- goto out;
--
-- trace_sched_waking(p);
--
-- if (!task_on_rq_queued(p)) {
-- if (p->in_iowait) {
-- delayacct_blkio_end(p);
-- atomic_dec(&rq->nr_iowait);
-- }
-- ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK);
-- }
--
-- ttwu_do_wakeup(rq, p, 0, rf);
-- ttwu_stat(p, smp_processor_id(), 0);
--out:
-- raw_spin_unlock(&p->pi_lock);
--}
--
--/**
- * wake_up_process - Wake up a specific process
- * @p: The process to be woken up.
- *
-@@ -3532,21 +3478,6 @@ static void __sched notrace __schedule(b
- atomic_inc(&rq->nr_iowait);
- delayacct_blkio_start();
- }
--
-- /*
-- * If a worker went to sleep, notify and ask workqueue
-- * whether it wants to wake up a task to maintain
-- * concurrency.
-- * Only call wake up if prev isn't blocked on a sleeping
-- * spin lock.
-- */
-- if (prev->flags & PF_WQ_WORKER && !prev->saved_state) {
-- struct task_struct *to_wakeup;
--
-- to_wakeup = wq_worker_sleeping(prev);
-- if (to_wakeup)
-- try_to_wake_up_local(to_wakeup, &rf);
-- }
- }
- switch_count = &prev->nvcsw;
- }
-@@ -3606,6 +3537,20 @@ static inline void sched_submit_work(str
- {
- if (!tsk->state || tsk_is_pi_blocked(tsk))
- return;
-+
-+ /*
-+ * If a worker went to sleep, notify and ask workqueue whether
-+ * it wants to wake up a task to maintain concurrency.
-+ * As this function is called inside the schedule() context,
-+ * we disable preemption to avoid it calling schedule() again
-+ * in the possible wakeup of a kworker.
-+ */
-+ if (tsk->flags & PF_WQ_WORKER) {
-+ preempt_disable();
-+ wq_worker_sleeping(tsk);
-+ preempt_enable_no_resched();
-+ }
-+
- /*
- * If we are going to sleep and we have plugged IO queued,
- * make sure to submit it to avoid deadlocks.
-@@ -3614,6 +3559,12 @@ static inline void sched_submit_work(str
- blk_schedule_flush_plug(tsk);
- }
-
-+static void sched_update_worker(struct task_struct *tsk)
-+{
-+ if (tsk->flags & PF_WQ_WORKER)
-+ wq_worker_running(tsk);
-+}
-+
- asmlinkage __visible void __sched schedule(void)
- {
- struct task_struct *tsk = current;
-@@ -3624,6 +3575,7 @@ asmlinkage __visible void __sched schedu
- __schedule(false);
- sched_preempt_enable_no_resched();
- } while (need_resched());
-+ sched_update_worker(tsk);
- }
- EXPORT_SYMBOL(schedule);
-
---- a/kernel/workqueue.c
-+++ b/kernel/workqueue.c
-@@ -843,43 +843,32 @@ static void wake_up_worker(struct worker
- }
-
- /**
-- * wq_worker_waking_up - a worker is waking up
-+ * wq_worker_running - a worker is running again
- * @task: task waking up
-- * @cpu: CPU @task is waking up to
- *
-- * This function is called during try_to_wake_up() when a worker is
-- * being awoken.
-- *
-- * CONTEXT:
-- * spin_lock_irq(rq->lock)
-+ * This function is called when a worker returns from schedule()
- */
--void wq_worker_waking_up(struct task_struct *task, int cpu)
-+void wq_worker_running(struct task_struct *task)
- {
- struct worker *worker = kthread_data(task);
-
-- if (!(worker->flags & WORKER_NOT_RUNNING)) {
-- WARN_ON_ONCE(worker->pool->cpu != cpu);
-+ if (!worker->sleeping)
-+ return;
-+ if (!(worker->flags & WORKER_NOT_RUNNING))
- atomic_inc(&worker->pool->nr_running);
-- }
-+ worker->sleeping = 0;
- }
-
- /**
- * wq_worker_sleeping - a worker is going to sleep
- * @task: task going to sleep
- *
-- * This function is called during schedule() when a busy worker is
-- * going to sleep. Worker on the same cpu can be woken up by
-- * returning pointer to its task.
-- *
-- * CONTEXT:
-- * spin_lock_irq(rq->lock)
-- *
-- * Return:
-- * Worker task on @cpu to wake up, %NULL if none.
-+ * This function is called from schedule() when a busy worker is
-+ * going to sleep.
- */
--struct task_struct *wq_worker_sleeping(struct task_struct *task)
-+void wq_worker_sleeping(struct task_struct *task)
- {
-- struct worker *worker = kthread_data(task), *to_wakeup = NULL;
-+ struct worker *next, *worker = kthread_data(task);
- struct worker_pool *pool;
-
- /*
-@@ -888,13 +877,15 @@ struct task_struct *wq_worker_sleeping(s
- * checking NOT_RUNNING.
- */
- if (worker->flags & WORKER_NOT_RUNNING)
-- return NULL;
-+ return;
-
- pool = worker->pool;
-
-- /* this can only happen on the local cpu */
-- if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id()))
-- return NULL;
-+ if (WARN_ON_ONCE(worker->sleeping))
-+ return;
-+
-+ worker->sleeping = 1;
-+ spin_lock_irq(&pool->lock);
-
- /*
- * The counterpart of the following dec_and_test, implied mb,
-@@ -908,9 +899,12 @@ struct task_struct *wq_worker_sleeping(s
- * lock is safe.
- */
- if (atomic_dec_and_test(&pool->nr_running) &&
-- !list_empty(&pool->worklist))
-- to_wakeup = first_idle_worker(pool);
-- return to_wakeup ? to_wakeup->task : NULL;
-+ !list_empty(&pool->worklist)) {
-+ next = first_idle_worker(pool);
-+ if (next)
-+ wake_up_process(next->task);
-+ }
-+ spin_unlock_irq(&pool->lock);
- }
-
- /**
---- a/kernel/workqueue_internal.h
-+++ b/kernel/workqueue_internal.h
-@@ -44,6 +44,7 @@ struct worker {
- unsigned long last_active; /* L: last active timestamp */
- unsigned int flags; /* X: flags */
- int id; /* I: worker id */
-+ int sleeping; /* None */
-
- /*
- * Opaque string set with work_set_desc(). Printed out with task
-@@ -72,8 +73,8 @@ static inline struct worker *current_wq_
- * Scheduler hooks for concurrency managed workqueue. Only to be used from
- * sched/ and workqueue.c.
- */
--void wq_worker_waking_up(struct task_struct *task, int cpu);
--struct task_struct *wq_worker_sleeping(struct task_struct *task);
-+void wq_worker_running(struct task_struct *task);
-+void wq_worker_sleeping(struct task_struct *task);
- work_func_t wq_worker_last_func(struct task_struct *task);
-
- #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
diff --git a/debian/patches-rt/workqueue-prevent-deadlock-stall.patch b/debian/patches-rt/workqueue-prevent-deadlock-stall.patch
deleted file mode 100644
index 5d2968675..000000000
--- a/debian/patches-rt/workqueue-prevent-deadlock-stall.patch
+++ /dev/null
@@ -1,200 +0,0 @@
-Subject: workqueue: Prevent deadlock/stall on RT
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 27 Jun 2014 16:24:52 +0200 (CEST)
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Austin reported a XFS deadlock/stall on RT where scheduled work gets
-never exececuted and tasks are waiting for each other for ever.
-
-The underlying problem is the modification of the RT code to the
-handling of workers which are about to go to sleep. In mainline a
-worker thread which goes to sleep wakes an idle worker if there is
-more work to do. This happens from the guts of the schedule()
-function. On RT this must be outside and the accessed data structures
-are not protected against scheduling due to the spinlock to rtmutex
-conversion. So the naive solution to this was to move the code outside
-of the scheduler and protect the data structures by the pool
-lock. That approach turned out to be a little naive as we cannot call
-into that code when the thread blocks on a lock, as it is not allowed
-to block on two locks in parallel. So we dont call into the worker
-wakeup magic when the worker is blocked on a lock, which causes the
-deadlock/stall observed by Austin and Mike.
-
-Looking deeper into that worker code it turns out that the only
-relevant data structure which needs to be protected is the list of
-idle workers which can be woken up.
-
-So the solution is to protect the list manipulation operations with
-preempt_enable/disable pairs on RT and call unconditionally into the
-worker code even when the worker is blocked on a lock. The preemption
-protection is safe as there is nothing which can fiddle with the list
-outside of thread context.
-
-Reported-and_tested-by: Austin Schuh <austin@peloton-tech.com>
-Reported-and_tested-by: Mike Galbraith <umgwanakikbuti@gmail.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Link: http://vger.kernel.org/r/alpine.DEB.2.10.1406271249510.5170@nanos
-Cc: Richard Weinberger <richard.weinberger@gmail.com>
-Cc: Steven Rostedt <rostedt@goodmis.org>
-
----
- kernel/sched/core.c | 6 +++--
- kernel/workqueue.c | 60 ++++++++++++++++++++++++++++++++++++++++------------
- 2 files changed, 51 insertions(+), 15 deletions(-)
-
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -3581,9 +3581,8 @@ void __noreturn do_task_dead(void)
-
- static inline void sched_submit_work(struct task_struct *tsk)
- {
-- if (!tsk->state || tsk_is_pi_blocked(tsk))
-+ if (!tsk->state)
- return;
--
- /*
- * If a worker went to sleep, notify and ask workqueue whether
- * it wants to wake up a task to maintain concurrency.
-@@ -3597,6 +3596,9 @@ static inline void sched_submit_work(str
- preempt_enable_no_resched();
- }
-
-+ if (tsk_is_pi_blocked(tsk))
-+ return;
-+
- /*
- * If we are going to sleep and we have plugged IO queued,
- * make sure to submit it to avoid deadlocks.
---- a/kernel/workqueue.c
-+++ b/kernel/workqueue.c
-@@ -125,6 +125,11 @@ enum {
- * cpu or grabbing pool->lock is enough for read access. If
- * POOL_DISASSOCIATED is set, it's identical to L.
- *
-+ * On RT we need the extra protection via rt_lock_idle_list() for
-+ * the list manipulations against read access from
-+ * wq_worker_sleeping(). All other places are nicely serialized via
-+ * pool->lock.
-+ *
- * A: wq_pool_attach_mutex protected.
- *
- * PL: wq_pool_mutex protected.
-@@ -430,6 +435,31 @@ static void workqueue_sysfs_unregister(s
- if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
- else
-
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+static inline void rt_lock_idle_list(struct worker_pool *pool)
-+{
-+ preempt_disable();
-+}
-+static inline void rt_unlock_idle_list(struct worker_pool *pool)
-+{
-+ preempt_enable();
-+}
-+static inline void sched_lock_idle_list(struct worker_pool *pool) { }
-+static inline void sched_unlock_idle_list(struct worker_pool *pool) { }
-+#else
-+static inline void rt_lock_idle_list(struct worker_pool *pool) { }
-+static inline void rt_unlock_idle_list(struct worker_pool *pool) { }
-+static inline void sched_lock_idle_list(struct worker_pool *pool)
-+{
-+ spin_lock_irq(&pool->lock);
-+}
-+static inline void sched_unlock_idle_list(struct worker_pool *pool)
-+{
-+ spin_unlock_irq(&pool->lock);
-+}
-+#endif
-+
-+
- #ifdef CONFIG_DEBUG_OBJECTS_WORK
-
- static struct debug_obj_descr work_debug_descr;
-@@ -836,10 +866,16 @@ static struct worker *first_idle_worker(
- */
- static void wake_up_worker(struct worker_pool *pool)
- {
-- struct worker *worker = first_idle_worker(pool);
-+ struct worker *worker;
-+
-+ rt_lock_idle_list(pool);
-+
-+ worker = first_idle_worker(pool);
-
- if (likely(worker))
- wake_up_process(worker->task);
-+
-+ rt_unlock_idle_list(pool);
- }
-
- /**
-@@ -868,7 +904,7 @@ void wq_worker_running(struct task_struc
- */
- void wq_worker_sleeping(struct task_struct *task)
- {
-- struct worker *next, *worker = kthread_data(task);
-+ struct worker *worker = kthread_data(task);
- struct worker_pool *pool;
-
- /*
-@@ -885,26 +921,18 @@ void wq_worker_sleeping(struct task_stru
- return;
-
- worker->sleeping = 1;
-- spin_lock_irq(&pool->lock);
-
- /*
- * The counterpart of the following dec_and_test, implied mb,
- * worklist not empty test sequence is in insert_work().
- * Please read comment there.
-- *
-- * NOT_RUNNING is clear. This means that we're bound to and
-- * running on the local cpu w/ rq lock held and preemption
-- * disabled, which in turn means that none else could be
-- * manipulating idle_list, so dereferencing idle_list without pool
-- * lock is safe.
- */
- if (atomic_dec_and_test(&pool->nr_running) &&
- !list_empty(&pool->worklist)) {
-- next = first_idle_worker(pool);
-- if (next)
-- wake_up_process(next->task);
-+ sched_lock_idle_list(pool);
-+ wake_up_worker(pool);
-+ sched_unlock_idle_list(pool);
- }
-- spin_unlock_irq(&pool->lock);
- }
-
- /**
-@@ -1695,7 +1723,9 @@ static void worker_enter_idle(struct wor
- worker->last_active = jiffies;
-
- /* idle_list is LIFO */
-+ rt_lock_idle_list(pool);
- list_add(&worker->entry, &pool->idle_list);
-+ rt_unlock_idle_list(pool);
-
- if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
- mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
-@@ -1728,7 +1758,9 @@ static void worker_leave_idle(struct wor
- return;
- worker_clr_flags(worker, WORKER_IDLE);
- pool->nr_idle--;
-+ rt_lock_idle_list(pool);
- list_del_init(&worker->entry);
-+ rt_unlock_idle_list(pool);
- }
-
- static struct worker *alloc_worker(int node)
-@@ -1896,7 +1928,9 @@ static void destroy_worker(struct worker
- pool->nr_workers--;
- pool->nr_idle--;
-
-+ rt_lock_idle_list(pool);
- list_del_init(&worker->entry);
-+ rt_unlock_idle_list(pool);
- worker->flags |= WORKER_DIE;
- wake_up_process(worker->task);
- }
diff --git a/debian/patches-rt/workqueue-use-locallock.patch b/debian/patches-rt/workqueue-use-locallock.patch
deleted file mode 100644
index cb94cc51d..000000000
--- a/debian/patches-rt/workqueue-use-locallock.patch
+++ /dev/null
@@ -1,179 +0,0 @@
-Subject: workqueue: Use local irq lock instead of irq disable regions
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Sun, 17 Jul 2011 21:42:26 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Use a local_irq_lock as a replacement for irq off regions. We keep the
-semantic of irq-off in regard to the pool->lock and remain preemptible.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- kernel/workqueue.c | 45 ++++++++++++++++++++++++++++++---------------
- 1 file changed, 30 insertions(+), 15 deletions(-)
-
---- a/kernel/workqueue.c
-+++ b/kernel/workqueue.c
-@@ -49,6 +49,7 @@
- #include <linux/uaccess.h>
- #include <linux/sched/isolation.h>
- #include <linux/nmi.h>
-+#include <linux/locallock.h>
-
- #include "workqueue_internal.h"
-
-@@ -350,6 +351,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient
- struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
- EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
-
-+static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock);
-+
- static int worker_thread(void *__worker);
- static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
-
-@@ -1123,9 +1126,11 @@ static void put_pwq_unlocked(struct pool
- * As both pwqs and pools are RCU protected, the
- * following lock operations are safe.
- */
-- spin_lock_irq(&pwq->pool->lock);
-+ rcu_read_lock();
-+ local_spin_lock_irq(pendingb_lock, &pwq->pool->lock);
- put_pwq(pwq);
-- spin_unlock_irq(&pwq->pool->lock);
-+ local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock);
-+ rcu_read_unlock();
- }
- }
-
-@@ -1229,7 +1234,7 @@ static int try_to_grab_pending(struct wo
- struct worker_pool *pool;
- struct pool_workqueue *pwq;
-
-- local_irq_save(*flags);
-+ local_lock_irqsave(pendingb_lock, *flags);
-
- /* try to steal the timer if it exists */
- if (is_dwork) {
-@@ -1293,7 +1298,7 @@ static int try_to_grab_pending(struct wo
- spin_unlock(&pool->lock);
- fail:
- rcu_read_unlock();
-- local_irq_restore(*flags);
-+ local_unlock_irqrestore(pendingb_lock, *flags);
- if (work_is_canceling(work))
- return -ENOENT;
- cpu_relax();
-@@ -1398,7 +1403,13 @@ static void __queue_work(int cpu, struct
- * queued or lose PENDING. Grabbing PENDING and queueing should
- * happen with IRQ disabled.
- */
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+ /*
-+ * nort: On RT the "interrupts-disabled" rule has been replaced with
-+ * pendingb_lock.
-+ */
- lockdep_assert_irqs_disabled();
-+#endif
-
- debug_work_activate(work);
-
-@@ -1504,14 +1515,14 @@ bool queue_work_on(int cpu, struct workq
- bool ret = false;
- unsigned long flags;
-
-- local_irq_save(flags);
-+ local_lock_irqsave(pendingb_lock,flags);
-
- if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
- __queue_work(cpu, wq, work);
- ret = true;
- }
-
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pendingb_lock, flags);
- return ret;
- }
- EXPORT_SYMBOL(queue_work_on);
-@@ -1520,8 +1531,11 @@ void delayed_work_timer_fn(struct timer_
- {
- struct delayed_work *dwork = from_timer(dwork, t, timer);
-
-+ /* XXX */
-+ /* local_lock(pendingb_lock); */
- /* should have been called from irqsafe timer with irq already off */
- __queue_work(dwork->cpu, dwork->wq, &dwork->work);
-+ /* local_unlock(pendingb_lock); */
- }
- EXPORT_SYMBOL(delayed_work_timer_fn);
-
-@@ -1576,14 +1590,14 @@ bool queue_delayed_work_on(int cpu, stru
- unsigned long flags;
-
- /* read the comment in __queue_work() */
-- local_irq_save(flags);
-+ local_lock_irqsave(pendingb_lock, flags);
-
- if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
- __queue_delayed_work(cpu, wq, dwork, delay);
- ret = true;
- }
-
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pendingb_lock, flags);
- return ret;
- }
- EXPORT_SYMBOL(queue_delayed_work_on);
-@@ -1618,7 +1632,7 @@ bool mod_delayed_work_on(int cpu, struct
-
- if (likely(ret >= 0)) {
- __queue_delayed_work(cpu, wq, dwork, delay);
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pendingb_lock, flags);
- }
-
- /* -ENOENT from try_to_grab_pending() becomes %true */
-@@ -1629,11 +1643,12 @@ EXPORT_SYMBOL_GPL(mod_delayed_work_on);
- static void rcu_work_rcufn(struct rcu_head *rcu)
- {
- struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
-+ unsigned long flags;
-
- /* read the comment in __queue_work() */
-- local_irq_disable();
-+ local_lock_irqsave(pendingb_lock, flags);
- __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
-- local_irq_enable();
-+ local_unlock_irqrestore(pendingb_lock, flags);
- }
-
- /**
-@@ -3022,7 +3037,7 @@ static bool __cancel_work_timer(struct w
-
- /* tell other tasks trying to grab @work to back off */
- mark_work_canceling(work);
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pendingb_lock, flags);
-
- /*
- * This allows canceling during early boot. We know that @work
-@@ -3083,10 +3098,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
- */
- bool flush_delayed_work(struct delayed_work *dwork)
- {
-- local_irq_disable();
-+ local_lock_irq(pendingb_lock);
- if (del_timer_sync(&dwork->timer))
- __queue_work(dwork->cpu, dwork->wq, &dwork->work);
-- local_irq_enable();
-+ local_unlock_irq(pendingb_lock);
- return flush_work(&dwork->work);
- }
- EXPORT_SYMBOL(flush_delayed_work);
-@@ -3124,7 +3139,7 @@ static bool __cancel_work(struct work_st
- return false;
-
- set_work_pool_and_clear_pending(work, get_work_pool_id(work));
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pendingb_lock, flags);
- return ret;
- }
-
diff --git a/debian/patches-rt/workqueue-use-rcu.patch b/debian/patches-rt/workqueue-use-rcu.patch
deleted file mode 100644
index c5128f81b..000000000
--- a/debian/patches-rt/workqueue-use-rcu.patch
+++ /dev/null
@@ -1,317 +0,0 @@
-Subject: workqueue: Use normal rcu
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Wed, 24 Jul 2013 15:26:54 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-There is no need for sched_rcu. The undocumented reason why sched_rcu
-is used is to avoid a few explicit rcu_read_lock()/unlock() pairs by
-abusing the fact that sched_rcu reader side critical sections are also
-protected by preempt or irq disabled regions.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- kernel/workqueue.c | 85 +++++++++++++++++++++++++++++------------------------
- 1 file changed, 47 insertions(+), 38 deletions(-)
-
---- a/kernel/workqueue.c
-+++ b/kernel/workqueue.c
-@@ -127,7 +127,7 @@ enum {
- *
- * PL: wq_pool_mutex protected.
- *
-- * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
-+ * PR: wq_pool_mutex protected for writes. RCU protected for reads.
- *
- * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
- *
-@@ -136,7 +136,7 @@ enum {
- *
- * WQ: wq->mutex protected.
- *
-- * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
-+ * WR: wq->mutex protected for writes. RCU protected for reads.
- *
- * MD: wq_mayday_lock protected.
- */
-@@ -183,7 +183,7 @@ struct worker_pool {
- atomic_t nr_running ____cacheline_aligned_in_smp;
-
- /*
-- * Destruction of pool is sched-RCU protected to allow dereferences
-+ * Destruction of pool is RCU protected to allow dereferences
- * from get_work_pool().
- */
- struct rcu_head rcu;
-@@ -212,7 +212,7 @@ struct pool_workqueue {
- /*
- * Release of unbound pwq is punted to system_wq. See put_pwq()
- * and pwq_unbound_release_workfn() for details. pool_workqueue
-- * itself is also sched-RCU protected so that the first pwq can be
-+ * itself is also RCU protected so that the first pwq can be
- * determined without grabbing wq->mutex.
- */
- struct work_struct unbound_release_work;
-@@ -357,20 +357,20 @@ static void workqueue_sysfs_unregister(s
- #include <trace/events/workqueue.h>
-
- #define assert_rcu_or_pool_mutex() \
-- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
-+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
- !lockdep_is_held(&wq_pool_mutex), \
-- "sched RCU or wq_pool_mutex should be held")
-+ "RCU or wq_pool_mutex should be held")
-
- #define assert_rcu_or_wq_mutex(wq) \
-- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
-+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
- !lockdep_is_held(&wq->mutex), \
-- "sched RCU or wq->mutex should be held")
-+ "RCU or wq->mutex should be held")
-
- #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
-- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
-+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
- !lockdep_is_held(&wq->mutex) && \
- !lockdep_is_held(&wq_pool_mutex), \
-- "sched RCU, wq->mutex or wq_pool_mutex should be held")
-+ "RCU, wq->mutex or wq_pool_mutex should be held")
-
- #define for_each_cpu_worker_pool(pool, cpu) \
- for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
-@@ -382,7 +382,7 @@ static void workqueue_sysfs_unregister(s
- * @pool: iteration cursor
- * @pi: integer used for iteration
- *
-- * This must be called either with wq_pool_mutex held or sched RCU read
-+ * This must be called either with wq_pool_mutex held or RCU read
- * locked. If the pool needs to be used beyond the locking in effect, the
- * caller is responsible for guaranteeing that the pool stays online.
- *
-@@ -414,7 +414,7 @@ static void workqueue_sysfs_unregister(s
- * @pwq: iteration cursor
- * @wq: the target workqueue
- *
-- * This must be called either with wq->mutex held or sched RCU read locked.
-+ * This must be called either with wq->mutex held or RCU read locked.
- * If the pwq needs to be used beyond the locking in effect, the caller is
- * responsible for guaranteeing that the pwq stays online.
- *
-@@ -550,7 +550,7 @@ static int worker_pool_assign_id(struct
- * @wq: the target workqueue
- * @node: the node ID
- *
-- * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU
-+ * This must be called with any of wq_pool_mutex, wq->mutex or RCU
- * read locked.
- * If the pwq needs to be used beyond the locking in effect, the caller is
- * responsible for guaranteeing that the pwq stays online.
-@@ -694,8 +694,8 @@ static struct pool_workqueue *get_work_p
- * @work: the work item of interest
- *
- * Pools are created and destroyed under wq_pool_mutex, and allows read
-- * access under sched-RCU read lock. As such, this function should be
-- * called under wq_pool_mutex or with preemption disabled.
-+ * access under RCU read lock. As such, this function should be
-+ * called under wq_pool_mutex or inside of a rcu_read_lock() region.
- *
- * All fields of the returned pool are accessible as long as the above
- * mentioned locking is in effect. If the returned pool needs to be used
-@@ -1120,7 +1120,7 @@ static void put_pwq_unlocked(struct pool
- {
- if (pwq) {
- /*
-- * As both pwqs and pools are sched-RCU protected, the
-+ * As both pwqs and pools are RCU protected, the
- * following lock operations are safe.
- */
- spin_lock_irq(&pwq->pool->lock);
-@@ -1248,6 +1248,7 @@ static int try_to_grab_pending(struct wo
- if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
- return 0;
-
-+ rcu_read_lock();
- /*
- * The queueing is in progress, or it is already queued. Try to
- * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
-@@ -1286,10 +1287,12 @@ static int try_to_grab_pending(struct wo
- set_work_pool_and_keep_pending(work, pool->id);
-
- spin_unlock(&pool->lock);
-+ rcu_read_unlock();
- return 1;
- }
- spin_unlock(&pool->lock);
- fail:
-+ rcu_read_unlock();
- local_irq_restore(*flags);
- if (work_is_canceling(work))
- return -ENOENT;
-@@ -1403,6 +1406,7 @@ static void __queue_work(int cpu, struct
- if (unlikely(wq->flags & __WQ_DRAINING) &&
- WARN_ON_ONCE(!is_chained_work(wq)))
- return;
-+ rcu_read_lock();
- retry:
- if (req_cpu == WORK_CPU_UNBOUND)
- cpu = wq_select_unbound_cpu(raw_smp_processor_id());
-@@ -1459,10 +1463,8 @@ static void __queue_work(int cpu, struct
- /* pwq determined, queue */
- trace_workqueue_queue_work(req_cpu, pwq, work);
-
-- if (WARN_ON(!list_empty(&work->entry))) {
-- spin_unlock(&pwq->pool->lock);
-- return;
-- }
-+ if (WARN_ON(!list_empty(&work->entry)))
-+ goto out;
-
- pwq->nr_in_flight[pwq->work_color]++;
- work_flags = work_color_to_flags(pwq->work_color);
-@@ -1480,7 +1482,9 @@ static void __queue_work(int cpu, struct
-
- insert_work(pwq, work, worklist, work_flags);
-
-+out:
- spin_unlock(&pwq->pool->lock);
-+ rcu_read_unlock();
- }
-
- /**
-@@ -2878,14 +2882,14 @@ static bool start_flush_work(struct work
-
- might_sleep();
-
-- local_irq_disable();
-+ rcu_read_lock();
- pool = get_work_pool(work);
- if (!pool) {
-- local_irq_enable();
-+ rcu_read_unlock();
- return false;
- }
-
-- spin_lock(&pool->lock);
-+ spin_lock_irq(&pool->lock);
- /* see the comment in try_to_grab_pending() with the same code */
- pwq = get_work_pwq(work);
- if (pwq) {
-@@ -2917,10 +2921,11 @@ static bool start_flush_work(struct work
- lock_map_acquire(&pwq->wq->lockdep_map);
- lock_map_release(&pwq->wq->lockdep_map);
- }
--
-+ rcu_read_unlock();
- return true;
- already_gone:
- spin_unlock_irq(&pool->lock);
-+ rcu_read_unlock();
- return false;
- }
-
-@@ -3364,7 +3369,7 @@ static void rcu_free_pool(struct rcu_hea
- * put_unbound_pool - put a worker_pool
- * @pool: worker_pool to put
- *
-- * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU
-+ * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
- * safe manner. get_unbound_pool() calls this function on its failure path
- * and this function should be able to release pools which went through,
- * successfully or not, init_worker_pool().
-@@ -4328,7 +4333,8 @@ bool workqueue_congested(int cpu, struct
- struct pool_workqueue *pwq;
- bool ret;
-
-- rcu_read_lock_sched();
-+ rcu_read_lock();
-+ preempt_disable();
-
- if (cpu == WORK_CPU_UNBOUND)
- cpu = smp_processor_id();
-@@ -4339,7 +4345,8 @@ bool workqueue_congested(int cpu, struct
- pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
-
- ret = !list_empty(&pwq->delayed_works);
-- rcu_read_unlock_sched();
-+ preempt_enable();
-+ rcu_read_unlock();
-
- return ret;
- }
-@@ -4365,15 +4372,15 @@ unsigned int work_busy(struct work_struc
- if (work_pending(work))
- ret |= WORK_BUSY_PENDING;
-
-- local_irq_save(flags);
-+ rcu_read_lock();
- pool = get_work_pool(work);
- if (pool) {
-- spin_lock(&pool->lock);
-+ spin_lock_irqsave(&pool->lock, flags);
- if (find_worker_executing_work(pool, work))
- ret |= WORK_BUSY_RUNNING;
-- spin_unlock(&pool->lock);
-+ spin_unlock_irqrestore(&pool->lock, flags);
- }
-- local_irq_restore(flags);
-+ rcu_read_unlock();
-
- return ret;
- }
-@@ -4557,7 +4564,7 @@ void show_workqueue_state(void)
- unsigned long flags;
- int pi;
-
-- rcu_read_lock_sched();
-+ rcu_read_lock();
-
- pr_info("Showing busy workqueues and worker pools:\n");
-
-@@ -4622,7 +4629,7 @@ void show_workqueue_state(void)
- touch_nmi_watchdog();
- }
-
-- rcu_read_unlock_sched();
-+ rcu_read_unlock();
- }
-
- /* used to show worker information through /proc/PID/{comm,stat,status} */
-@@ -5009,16 +5016,16 @@ bool freeze_workqueues_busy(void)
- * nr_active is monotonically decreasing. It's safe
- * to peek without lock.
- */
-- rcu_read_lock_sched();
-+ rcu_read_lock();
- for_each_pwq(pwq, wq) {
- WARN_ON_ONCE(pwq->nr_active < 0);
- if (pwq->nr_active) {
- busy = true;
-- rcu_read_unlock_sched();
-+ rcu_read_unlock();
- goto out_unlock;
- }
- }
-- rcu_read_unlock_sched();
-+ rcu_read_unlock();
- }
- out_unlock:
- mutex_unlock(&wq_pool_mutex);
-@@ -5213,7 +5220,8 @@ static ssize_t wq_pool_ids_show(struct d
- const char *delim = "";
- int node, written = 0;
-
-- rcu_read_lock_sched();
-+ get_online_cpus();
-+ rcu_read_lock();
- for_each_node(node) {
- written += scnprintf(buf + written, PAGE_SIZE - written,
- "%s%d:%d", delim, node,
-@@ -5221,7 +5229,8 @@ static ssize_t wq_pool_ids_show(struct d
- delim = " ";
- }
- written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
-- rcu_read_unlock_sched();
-+ rcu_read_unlock();
-+ put_online_cpus();
-
- return written;
- }
diff --git a/debian/patches-rt/x86-Disable-HAVE_ARCH_JUMP_LABEL.patch b/debian/patches-rt/x86-Disable-HAVE_ARCH_JUMP_LABEL.patch
new file mode 100644
index 000000000..cd7830853
--- /dev/null
+++ b/debian/patches-rt/x86-Disable-HAVE_ARCH_JUMP_LABEL.patch
@@ -0,0 +1,33 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 1 Jul 2019 17:39:28 +0200
+Subject: [PATCH] x86: Disable HAVE_ARCH_JUMP_LABEL
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+__text_poke() does:
+| local_irq_save(flags);
+…
+| ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
+
+which does not work on -RT because the PTE-lock is a spinlock_t typed lock.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/Kconfig | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -127,8 +127,8 @@ config X86
+ select HAVE_ALIGNED_STRUCT_PAGE if SLUB
+ select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
+- select HAVE_ARCH_JUMP_LABEL
+- select HAVE_ARCH_JUMP_LABEL_RELATIVE
++ select HAVE_ARCH_JUMP_LABEL if !PREEMPT_RT_FULL
++ select HAVE_ARCH_JUMP_LABEL_RELATIVE if !PREEMPT_RT_FULL
+ select HAVE_ARCH_KASAN if X86_64
+ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_MMAP_RND_BITS if MMU
diff --git a/debian/patches-rt/x86-crypto-reduce-preempt-disabled-regions.patch b/debian/patches-rt/x86-crypto-reduce-preempt-disabled-regions.patch
index 4ab5e0855..b8175fd74 100644
--- a/debian/patches-rt/x86-crypto-reduce-preempt-disabled-regions.patch
+++ b/debian/patches-rt/x86-crypto-reduce-preempt-disabled-regions.patch
@@ -1,7 +1,7 @@
Subject: x86: crypto: Reduce preempt disabled regions
From: Peter Zijlstra <peterz@infradead.org>
Date: Mon, 14 Nov 2011 18:19:27 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Restrict the preempt disabled regions to the actual floating point
operations and enable preemption for the administrative actions.
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
-@@ -415,14 +415,14 @@ static int ecb_encrypt(struct skcipher_r
+@@ -402,14 +402,14 @@ static int ecb_encrypt(struct skcipher_r
err = skcipher_walk_virt(&walk, req, true);
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return err;
}
-@@ -437,14 +437,14 @@ static int ecb_decrypt(struct skcipher_r
+@@ -424,14 +424,14 @@ static int ecb_decrypt(struct skcipher_r
err = skcipher_walk_virt(&walk, req, true);
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return err;
}
-@@ -459,14 +459,14 @@ static int cbc_encrypt(struct skcipher_r
+@@ -446,14 +446,14 @@ static int cbc_encrypt(struct skcipher_r
err = skcipher_walk_virt(&walk, req, true);
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return err;
}
-@@ -481,14 +481,14 @@ static int cbc_decrypt(struct skcipher_r
+@@ -468,14 +468,14 @@ static int cbc_decrypt(struct skcipher_r
err = skcipher_walk_virt(&walk, req, true);
@@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return err;
}
-@@ -538,18 +538,20 @@ static int ctr_crypt(struct skcipher_req
+@@ -525,18 +525,20 @@ static int ctr_crypt(struct skcipher_req
err = skcipher_walk_virt(&walk, req, true);
diff --git a/debian/patches-rt/x86-highmem-add-a-already-used-pte-check.patch b/debian/patches-rt/x86-highmem-add-a-already-used-pte-check.patch
index 26557a1c5..a973a4234 100644
--- a/debian/patches-rt/x86-highmem-add-a-already-used-pte-check.patch
+++ b/debian/patches-rt/x86-highmem-add-a-already-used-pte-check.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 11 Mar 2013 17:09:55 +0100
Subject: x86/highmem: Add a "already used pte" check
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
This is a copy from kmap_atomic_prot().
@@ -12,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
-@@ -69,6 +69,8 @@ void *kmap_atomic_prot_pfn(unsigned long
+@@ -56,6 +56,8 @@ void *kmap_atomic_prot_pfn(unsigned long
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
diff --git a/debian/patches-rt/x86-ima-Check-EFI_RUNTIME_SERVICES-before-using.patch b/debian/patches-rt/x86-ima-Check-EFI_RUNTIME_SERVICES-before-using.patch
deleted file mode 100644
index 350fb178d..000000000
--- a/debian/patches-rt/x86-ima-Check-EFI_RUNTIME_SERVICES-before-using.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From: Scott Wood <swood@redhat.com>
-Date: Tue, 23 Apr 2019 17:48:07 -0500
-Subject: [PATCH] x86/ima: Check EFI_RUNTIME_SERVICES before using
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Checking efi_enabled(EFI_BOOT) is not sufficient to ensure that
-EFI runtime services are available, e.g. if efi=noruntime is used.
-
-Without this, I get an oops on a PREEMPT_RT kernel where efi=noruntime is
-the default.
-
-Fixes: 399574c64eaf94e8 ("x86/ima: retry detecting secure boot mode")
-Signed-off-by: Scott Wood <swood@redhat.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/ima_arch.c | 5 +++++
- 1 file changed, 5 insertions(+)
-
---- a/arch/x86/kernel/ima_arch.c
-+++ b/arch/x86/kernel/ima_arch.c
-@@ -17,6 +17,11 @@ static enum efi_secureboot_mode get_sb_m
-
- size = sizeof(secboot);
-
-+ if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
-+ pr_info("ima: secureboot mode unknown, no efi\n");
-+ return efi_secureboot_mode_unknown;
-+ }
-+
- /* Get variable contents into buffer */
- status = efi.get_variable(efi_SecureBoot_name, &efi_variable_guid,
- NULL, &size, &secboot);
diff --git a/debian/patches-rt/x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch b/debian/patches-rt/x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch
index 51fc3ba1f..f12e13b95 100644
--- a/debian/patches-rt/x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch
+++ b/debian/patches-rt/x86-ioapic-Don-t-let-setaffinity-unmask-threaded-EOI.patch
@@ -2,7 +2,7 @@ From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 17 Jul 2018 18:25:31 +0200
Subject: [PATCH] x86/ioapic: Don't let setaffinity unmask threaded EOI
interrupt too early
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
There is an issue with threaded interrupts which are marked ONESHOT
and using the fasteoi handler.
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
-@@ -1722,19 +1722,20 @@ static bool io_apic_level_ack_pending(st
+@@ -1724,19 +1724,20 @@ static bool io_apic_level_ack_pending(st
return false;
}
@@ -60,7 +60,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Only migrate the irq if the ack has been received.
*
* On rare occasions the broadcast level triggered ack gets
-@@ -1763,15 +1764,17 @@ static inline void ioapic_irqd_unmask(st
+@@ -1765,15 +1766,17 @@ static inline void ioapic_irqd_unmask(st
*/
if (!io_apic_level_ack_pending(data->chip_data))
irq_move_masked_irq(data);
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
}
#endif
-@@ -1780,11 +1783,11 @@ static void ioapic_ack_level(struct irq_
+@@ -1782,11 +1785,11 @@ static void ioapic_ack_level(struct irq_
{
struct irq_cfg *cfg = irqd_cfg(irq_data);
unsigned long v;
@@ -95,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* It appears there is an erratum which affects at least version 0x11
-@@ -1839,7 +1842,7 @@ static void ioapic_ack_level(struct irq_
+@@ -1841,7 +1844,7 @@ static void ioapic_ack_level(struct irq_
eoi_ioapic_pin(cfg->vector, irq_data->chip_data);
}
diff --git a/debian/patches-rt/x86-kvm-require-const-tsc-for-rt.patch b/debian/patches-rt/x86-kvm-require-const-tsc-for-rt.patch
index bb02f50c6..3b3c6d308 100644
--- a/debian/patches-rt/x86-kvm-require-const-tsc-for-rt.patch
+++ b/debian/patches-rt/x86-kvm-require-const-tsc-for-rt.patch
@@ -1,7 +1,7 @@
Subject: x86: kvm Require const tsc for RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 06 Nov 2011 12:26:18 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Non constant TSC is a nightmare on bare metal already, but with
virtualization it becomes a complete disaster because the workarounds
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -6936,6 +6936,14 @@ int kvm_arch_init(void *opaque)
+@@ -7013,6 +7013,14 @@ int kvm_arch_init(void *opaque)
goto out;
}
diff --git a/debian/patches-rt/x86-ldt-Initialize-the-context-lock-for-init_mm.patch b/debian/patches-rt/x86-ldt-Initialize-the-context-lock-for-init_mm.patch
new file mode 100644
index 000000000..eb9fe1a41
--- /dev/null
+++ b/debian/patches-rt/x86-ldt-Initialize-the-context-lock-for-init_mm.patch
@@ -0,0 +1,28 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 1 Jul 2019 17:53:13 +0200
+Subject: [PATCH] x86/ldt: Initialize the context lock for init_mm
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
+
+The mutex mm->context->lock for init_mm is not initialized for init_mm.
+This wasn't a problem because it remained unused. This changed however
+since commit
+ 4fc19708b165c ("x86/alternatives: Initialize temporary mm for patching")
+
+Initialize the mutex for init_mm.
+
+Fixes: 4fc19708b165c ("x86/alternatives: Initialize temporary mm for patching")
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/include/asm/mmu.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/include/asm/mmu.h
++++ b/arch/x86/include/asm/mmu.h
+@@ -59,6 +59,7 @@ typedef struct {
+ #define INIT_MM_CONTEXT(mm) \
+ .context = { \
+ .ctx_id = 1, \
++ .lock = __MUTEX_INITIALIZER(mm.context.lock), \
+ }
+
+ void leave_mm(int cpu);
diff --git a/debian/patches-rt/x86-preempt-lazy.patch b/debian/patches-rt/x86-preempt-lazy.patch
index 767f577fa..e15fbf9b2 100644
--- a/debian/patches-rt/x86-preempt-lazy.patch
+++ b/debian/patches-rt/x86-preempt-lazy.patch
@@ -1,7 +1,7 @@
Subject: x86: Support for lazy preemption
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 01 Nov 2012 11:03:47 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
Implement the x86 pieces for lazy preempt.
@@ -18,17 +18,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -183,6 +183,7 @@ config X86
+@@ -192,6 +192,7 @@ config X86
select HAVE_PCI
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_PREEMPT_LAZY
select HAVE_RCU_TABLE_FREE if PARAVIRT
- select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
-@@ -134,7 +134,7 @@ static long syscall_trace_enter(struct p
+@@ -135,7 +135,7 @@ static long syscall_trace_enter(struct p
#define EXIT_TO_USERMODE_LOOP_FLAGS \
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
{
-@@ -149,7 +149,7 @@ static void exit_to_usermode_loop(struct
+@@ -150,7 +150,7 @@ static void exit_to_usermode_loop(struct
/* We have work to do. */
local_irq_enable();
@@ -48,10 +48,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
-@@ -767,8 +767,25 @@ END(ret_from_exception)
+@@ -768,8 +768,25 @@ END(ret_from_exception)
+ #ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
- .Lneed_resched:
+ # preempt count == 0 + NEED_RS set?
cmpl $0, PER_CPU_VAR(__preempt_count)
+#ifndef CONFIG_PREEMPT_LAZY
@@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ jz test_int_off
+
+ # atleast preempt count == 0 ?
-+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
+ jne restore_all_kernel
+
+ movl PER_CPU_VAR(current_task), %ebp
@@ -79,14 +79,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
@@ -646,7 +646,23 @@ GLOBAL(swapgs_restore_regs_and_return_to
btl $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f
- 0: cmpl $0, PER_CPU_VAR(__preempt_count)
+ cmpl $0, PER_CPU_VAR(__preempt_count)
+#ifndef CONFIG_PREEMPT_LAZY
jnz 1f
+#else
+ jz do_preempt_schedule_irq
+
+ # atleast preempt count == 0 ?
-+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
+ jnz 1f
+
+ movq PER_CPU_VAR(current_task), %rcx
@@ -98,8 +98,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+do_preempt_schedule_irq:
+#endif
call preempt_schedule_irq
- jmp 0b
1:
+ #endif
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -89,17 +89,46 @@ static __always_inline void __preempt_co
diff --git a/debian/patches-rt/x86-signal-delay-calling-signals-on-32bit.patch b/debian/patches-rt/x86-signal-delay-calling-signals-on-32bit.patch
index 4360ef02c..d0842728f 100644
--- a/debian/patches-rt/x86-signal-delay-calling-signals-on-32bit.patch
+++ b/debian/patches-rt/x86-signal-delay-calling-signals-on-32bit.patch
@@ -1,7 +1,7 @@
From: Yang Shi <yang.shi@linaro.org>
Date: Thu, 10 Dec 2015 10:58:51 -0800
Subject: x86/signal: delay calling signals on 32bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
When running some ptrace single step tests on x86-32 machine, the below problem
is triggered:
diff --git a/debian/patches-rt/x86-stackprot-no-random-on-rt.patch b/debian/patches-rt/x86-stackprot-no-random-on-rt.patch
index a512ba269..c715b5af9 100644
--- a/debian/patches-rt/x86-stackprot-no-random-on-rt.patch
+++ b/debian/patches-rt/x86-stackprot-no-random-on-rt.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 16 Dec 2010 14:25:18 +0100
Subject: x86: stackprotector: Avoid random pool on rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2-rt1.tar.xz
CPU bringup calls into the random pool to initialize the stack
canary. During boot that works nicely even on RT as the might sleep
diff --git a/debian/patches-rt/x86-use-gen-rwsem-spinlocks-rt.patch b/debian/patches-rt/x86-use-gen-rwsem-spinlocks-rt.patch
deleted file mode 100644
index 718b89769..000000000
--- a/debian/patches-rt/x86-use-gen-rwsem-spinlocks-rt.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Sun, 26 Jul 2009 02:21:32 +0200
-Subject: x86: Use generic rwsem_spinlocks on -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-Simplifies the separation of anon_rw_semaphores and rw_semaphores for
--rt.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- arch/x86/Kconfig | 5 ++++-
- 1 file changed, 4 insertions(+), 1 deletion(-)
-
---- a/arch/x86/Kconfig
-+++ b/arch/x86/Kconfig
-@@ -269,8 +269,11 @@ config ARCH_MAY_HAVE_PC_FDC
- def_bool y
- depends on ISA_DMA_API
-
-+config RWSEM_GENERIC_SPINLOCK
-+ def_bool PREEMPT_RT_FULL
-+
- config RWSEM_XCHGADD_ALGORITHM
-- def_bool y
-+ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
-
- config GENERIC_CALIBRATE_DELAY
- def_bool y