summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSalvatore Bonaccorso <carnil@debian.org>2020-11-01 11:49:30 +0100
committerSalvatore Bonaccorso <carnil@debian.org>2020-11-01 12:02:50 +0100
commitde9f3d1269595f3d927f0534afa1baeed07ef787 (patch)
treeb9cd84199ee1954d4bb76e9769827faedc072cb1
parent8437d1385c29e9870f74d7177356a1776b765347 (diff)
downloadlinux-debian-de9f3d1269595f3d927f0534afa1baeed07ef787.tar.gz
[rt] Update to 5.9.1-rt20
[rt] Refresh "stop_machine: Add function and caller debug info" [rt] Comment out seqlock (sub) patchset
-rw-r--r--debian/changelog3
-rw-r--r--debian/patches-rt/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch16
-rw-r--r--debian/patches-rt/0001-blk-mq-Don-t-complete-on-a-remote-CPU-in-force-threa.patch38
-rw-r--r--debian/patches-rt/0001-crash-add-VMCOREINFO-macro-to-define-offset-in-a-str.patch34
-rw-r--r--debian/patches-rt/0001-locking-rtmutex-Remove-cruft.patch4
-rw-r--r--debian/patches-rt/0001-printk-rb-add-printk-ring-buffer-documentation.patch394
-rw-r--r--debian/patches-rt/0001-printk-refactor-kmsg_dump_get_buffer.patch123
-rw-r--r--debian/patches-rt/0001-stop_machine-Add-function-and-caller-debug-info.patch24
-rw-r--r--debian/patches-rt/0001-time-sched_clock-Use-raw_read_seqcount_latch-during-.patch2
-rw-r--r--debian/patches-rt/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch22
-rw-r--r--debian/patches-rt/0002-blk-mq-Always-complete-remote-completions-requests-i.patch39
-rw-r--r--debian/patches-rt/0002-locking-rtmutex-Remove-output-from-deadlock-detector.patch4
-rw-r--r--debian/patches-rt/0002-mm-swap-Do-not-abuse-the-seqcount_t-latching-API.patch2
-rw-r--r--debian/patches-rt/0002-printk-add-lockless-ringbuffer.patch2148
-rw-r--r--debian/patches-rt/0002-printk-use-buffer-pools-for-sprint-buffers.patch194
-rw-r--r--debian/patches-rt/0002-sched-Fix-balance_callback.patch6
-rw-r--r--debian/patches-rt/0003-Revert-printk-lock-unlock-console-only-for-new-logbu.patch61
-rw-r--r--debian/patches-rt/0003-blk-mq-Use-llist_head-for-blk_cpu_done.patch166
-rw-r--r--debian/patches-rt/0003-locking-rtmutex-Move-rt_mutex_init-outside-of-CONFIG.patch4
-rw-r--r--debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch2
-rw-r--r--debian/patches-rt/0003-printk-change-clear_seq-to-atomic64_t.patch110
-rw-r--r--debian/patches-rt/0003-printk-rb-define-ring-buffer-struct-and-initializer.patch58
-rw-r--r--debian/patches-rt/0003-sched-hotplug-Ensure-only-per-cpu-kthreads-run-durin.patch54
-rw-r--r--debian/patches-rt/0003-seqlock-Introduce-seqcount_latch_t.patch2
-rw-r--r--debian/patches-rt/0004-locking-rtmutex-Remove-rt_mutex_timed_lock.patch4
-rw-r--r--debian/patches-rt/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch2
-rw-r--r--debian/patches-rt/0004-printk-rb-add-writer-interface.patch234
-rw-r--r--debian/patches-rt/0004-printk-remove-logbuf_lock-add-syslog_lock.patch573
-rw-r--r--debian/patches-rt/0004-printk-use-the-lockless-ringbuffer.patch1520
-rw-r--r--debian/patches-rt/0004-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch20
-rw-r--r--debian/patches-rt/0004-time-sched_clock-Use-seqcount_latch_t.patch2
-rw-r--r--debian/patches-rt/0005-MAINTAIERS-Add-John-Ogness-as-printk-reviewer.patch29
-rw-r--r--debian/patches-rt/0005-locking-rtmutex-Handle-the-various-new-futex-race-co.patch4
-rw-r--r--debian/patches-rt/0005-printk-rb-add-basic-non-blocking-reading-interface.patch260
-rw-r--r--debian/patches-rt/0005-printk-remove-safe-buffers.patch (renamed from debian/patches-rt/0011-printk_safe-remove-printk-safe-code.patch)397
-rw-r--r--debian/patches-rt/0005-timekeeping-Use-seqcount_latch_t.patch2
-rw-r--r--debian/patches-rt/0005-workqueue-Manually-break-affinity-on-hotplug.patch7
-rw-r--r--debian/patches-rt/0006-console-add-write_atomic-interface.patch (renamed from debian/patches-rt/0002-printk-rb-add-prb-locking-functions.patch)118
-rw-r--r--debian/patches-rt/0006-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch4
-rw-r--r--debian/patches-rt/0006-printk-rb-add-blocking-reader-support.patch162
-rw-r--r--debian/patches-rt/0006-printk-ringbuffer-support-dataless-records.patch253
-rw-r--r--debian/patches-rt/0006-sched-hotplug-Consolidate-task-migration-on-CPU-unpl.patch14
-rw-r--r--debian/patches-rt/0006-x86-tsc-Use-seqcount_latch_t.patch2
-rw-r--r--debian/patches-rt/0007-locking-rtmutex-Add-rtmutex_lock_killable.patch50
-rw-r--r--debian/patches-rt/0007-locking-rtmutex-Make-lock_killable-work.patch (renamed from debian/patches-rt/0008-locking-rtmutex-Make-lock_killable-work.patch)4
-rw-r--r--debian/patches-rt/0007-printk-rb-add-functionality-required-by-printk.patch160
-rw-r--r--debian/patches-rt/0007-printk-reduce-LOG_BUF_SHIFT-range-for-H8300.patch33
-rw-r--r--debian/patches-rt/0007-rbtree_latch-Use-seqcount_latch_t.patch2
-rw-r--r--debian/patches-rt/0007-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch38
-rw-r--r--debian/patches-rt/0007-serial-8250-implement-write_atomic.patch (renamed from debian/patches-rt/0020-serial-8250-implement-write_atomic.patch)382
-rw-r--r--debian/patches-rt/0008-docs-vmcoreinfo-add-lockless-printk-ringbuffer-vmcor.patch182
-rw-r--r--debian/patches-rt/0008-locking-spinlock-Split-the-lock-types-header.patch (renamed from debian/patches-rt/0009-locking-spinlock-Split-the-lock-types-header.patch)4
-rw-r--r--debian/patches-rt/0008-printk-add-ring-buffer-and-kthread.patch169
-rw-r--r--debian/patches-rt/0008-printk-inline-log_output-log_store-in-vprintk_store.patch190
-rw-r--r--debian/patches-rt/0008-sched-Massage-set_cpus_allowed.patch10
-rw-r--r--debian/patches-rt/0008-seqlock-seqcount-latch-APIs-Only-allow-seqcount_latc.patch2
-rw-r--r--debian/patches-rt/0009-locking-rtmutex-Avoid-include-hell.patch (renamed from debian/patches-rt/0010-locking-rtmutex-Avoid-include-hell.patch)4
-rw-r--r--debian/patches-rt/0009-printk-relocate-printk_delay-and-vprintk_default.patch83
-rw-r--r--debian/patches-rt/0009-printk-remove-exclusive-console-hack.patch102
-rw-r--r--debian/patches-rt/0009-sched-Add-migrate_disable.patch23
-rw-r--r--debian/patches-rt/0009-scripts-gdb-add-utils.read_ulong.patch35
-rw-r--r--debian/patches-rt/0009-seqlock-seqcount_LOCKNAME_t-Standardize-naming-conve.patch2
-rw-r--r--debian/patches-rt/0010-lockdep-Reduce-header-files-in-debug_locks.h.patch (renamed from debian/patches-rt/0011-lockdep-Reduce-header-files-in-debug_locks.h.patch)2
-rw-r--r--debian/patches-rt/0010-printk-combine-boot_delay_msec-into-printk_delay.patch38
-rw-r--r--debian/patches-rt/0010-printk-redirect-emit-store-to-new-ringbuffer.patch438
-rw-r--r--debian/patches-rt/0010-sched-Fix-migrate_disable-vs-set_cpus_allowed_ptr.patch172
-rw-r--r--debian/patches-rt/0010-scripts-gdb-update-for-lockless-printk-ringbuffer.patch389
-rw-r--r--debian/patches-rt/0010-seqlock-Use-unique-prefix-for-seqcount_t-property-ac.patch2
-rw-r--r--debian/patches-rt/0011-locking-split-out-the-rbtree-definition.patch (renamed from debian/patches-rt/0012-locking-split-out-the-rbtree-definition.patch)4
-rw-r--r--debian/patches-rt/0011-printk-introduce-kernel-sync-mode.patch325
-rw-r--r--debian/patches-rt/0011-printk-ringbuffer-fix-setting-state-in-desc_read.patch75
-rw-r--r--debian/patches-rt/0011-sched-core-Make-migrate-disable-and-CPU-hotplug-coop.patch14
-rw-r--r--debian/patches-rt/0011-seqlock-seqcount_t-Implement-all-read-APIs-as-statem.patch2
-rw-r--r--debian/patches-rt/0012-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch (renamed from debian/patches-rt/0013-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch)4
-rw-r--r--debian/patches-rt/0012-printk-minimize-console-locking-implementation.patch330
-rw-r--r--debian/patches-rt/0012-printk-move-console-printing-to-kthreads.patch800
-rw-r--r--debian/patches-rt/0012-printk-ringbuffer-avoid-memcpy-on-state_var.patch42
-rw-r--r--debian/patches-rt/0012-sched-rt-Use-cpumask_any-_distribute.patch21
-rw-r--r--debian/patches-rt/0012-seqlock-seqcount_LOCKNAME_t-Introduce-PREEMPT_RT-sup.patch2
-rw-r--r--debian/patches-rt/0013-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch (renamed from debian/patches-rt/0014-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch)23
-rw-r--r--debian/patches-rt/0013-printk-remove-deferred-printing.patch360
-rw-r--r--debian/patches-rt/0013-printk-ringbuffer-relocate-get_data.patch149
-rw-r--r--debian/patches-rt/0013-printk-track-seq-per-console.patch93
-rw-r--r--debian/patches-rt/0013-sched-rt-Use-the-full-cpumask-for-balancing.patch6
-rw-r--r--debian/patches-rt/0013-seqlock-PREEMPT_RT-Do-not-starve-seqlock_t-writers.patch2
-rw-r--r--debian/patches-rt/0014-printk-add-console-handover.patch68
-rw-r--r--debian/patches-rt/0014-printk-do-boot_delay_msec-inside-printk_delay.patch72
-rw-r--r--debian/patches-rt/0014-printk-ringbuffer-add-BLK_DATALESS-macro.patch47
-rw-r--r--debian/patches-rt/0014-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch (renamed from debian/patches-rt/0015-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch)10
-rw-r--r--debian/patches-rt/0014-sched-lockdep-Annotate-pi_lock-recursion.patch10
-rw-r--r--debian/patches-rt/0015-locking-rtmutex-add-sleeping-lock-implementation.patch (renamed from debian/patches-rt/0016-locking-rtmutex-add-sleeping-lock-implementation.patch)18
-rw-r--r--debian/patches-rt/0015-printk-print-history-for-new-consoles.patch119
-rw-r--r--debian/patches-rt/0015-printk-ringbuffer-clear-initial-reserved-fields.patch136
-rw-r--r--debian/patches-rt/0015-sched-Fix-migrate_disable-vs-rt-dl-balancing.patch48
-rw-r--r--debian/patches-rt/0016-locking-rtmutex-Allow-rt_mutex_trylock-on-PREEMPT_RT.patch (renamed from debian/patches-rt/0017-locking-rtmutex-Allow-rt_mutex_trylock-on-PREEMPT_RT.patch)6
-rw-r--r--debian/patches-rt/0016-printk-implement-CON_PRINTBUFFER.patch92
-rw-r--r--debian/patches-rt/0016-printk-ringbuffer-change-representation-of-states.patch207
-rw-r--r--debian/patches-rt/0016-sched-proc-Print-accurate-cpumask-vs-migrate_disable.patch6
-rw-r--r--debian/patches-rt/0017-locking-rtmutex-add-mutex-implementation-based-on-rt.patch (renamed from debian/patches-rt/0018-locking-rtmutex-add-mutex-implementation-based-on-rt.patch)67
-rw-r--r--debian/patches-rt/0017-printk-add-processor-number-to-output.patch100
-rw-r--r--debian/patches-rt/0017-printk-ringbuffer-add-finalization-extension-support.patch898
-rw-r--r--debian/patches-rt/0017-sched-Add-migrate_disable-tracepoints.patch8
-rw-r--r--debian/patches-rt/0018-console-add-write_atomic-interface.patch65
-rw-r--r--debian/patches-rt/0018-locking-rtmutex-add-rwsem-implementation-based-on-rt.patch (renamed from debian/patches-rt/0019-locking-rtmutex-add-rwsem-implementation-based-on-rt.patch)25
-rw-r--r--debian/patches-rt/0018-printk-reimplement-log_cont-using-record-extension.patch144
-rw-r--r--debian/patches-rt/0018-sched-Deny-self-issued-__set_cpus_allowed_ptr-when-m.patch40
-rw-r--r--debian/patches-rt/0019-locking-rtmutex-add-rwlock-implementation-based-on-r.patch (renamed from debian/patches-rt/0020-locking-rtmutex-add-rwlock-implementation-based-on-r.patch)6
-rw-r--r--debian/patches-rt/0019-printk-introduce-emergency-messages.patch273
-rw-r--r--debian/patches-rt/0019-printk-move-printk_info-into-separate-array.patch606
-rw-r--r--debian/patches-rt/0019-sched-Comment-affine_move_task.patch124
-rw-r--r--debian/patches-rt/0020-locking-rtmutex-wire-up-RT-s-locking.patch (renamed from debian/patches-rt/0021-locking-rtmutex-wire-up-RT-s-locking.patch)4
-rw-r--r--debian/patches-rt/0020-printk-move-dictionary-keys-to-dev_printk_info.patch764
-rw-r--r--debian/patches-rt/0021-locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch (renamed from debian/patches-rt/0022-locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch)10
-rw-r--r--debian/patches-rt/0021-printk-implement-KERN_CONT.patch133
-rw-r--r--debian/patches-rt/0021-printk-remove-dict-ring.patch787
-rw-r--r--debian/patches-rt/0022-locking-rtmutex-Use-custom-scheduling-function-for-s.patch (renamed from debian/patches-rt/0023-locking-rtmutex-Use-custom-scheduling-function-for-s.patch)24
-rw-r--r--debian/patches-rt/0022-printk-avoid-and-or-handle-record-truncation.patch119
-rw-r--r--debian/patches-rt/0022-printk-implement-dev-kmsg.patch305
-rw-r--r--debian/patches-rt/0023-printk-implement-syslog.patch494
-rw-r--r--debian/patches-rt/0023-printk-reduce-setup_text_buf-size-to-LOG_LINE_MAX.patch28
-rw-r--r--debian/patches-rt/0024-printk-Use-fallthrough-pseudo-keyword.patch29
-rw-r--r--debian/patches-rt/0024-printk-implement-kmsg_dump.patch398
-rw-r--r--debian/patches-rt/0024-xfrm-Use-sequence-counter-with-associated-spinlock.patch4
-rw-r--r--debian/patches-rt/0025-printk-remove-unused-code.patch362
-rw-r--r--debian/patches-rt/0025-printk-ringbuffer-Wrong-data-pointer-when-appending-.patch141
-rw-r--r--debian/patches-rt/ARM-Allow-to-enable-RT.patch2
-rw-r--r--debian/patches-rt/ARM-enable-irq-in-translation-section-permission-fau.patch2
-rw-r--r--debian/patches-rt/ARM64-Allow-to-enable-RT.patch2
-rw-r--r--debian/patches-rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch2
-rw-r--r--debian/patches-rt/POWERPC-Allow-to-enable-RT.patch2
-rw-r--r--debian/patches-rt/Use-CONFIG_PREEMPTION.patch4
-rw-r--r--debian/patches-rt/add_cpu_light.patch2
-rw-r--r--debian/patches-rt/arch-arm64-Add-lazy-preempt-support.patch2
-rw-r--r--debian/patches-rt/arm-enable-highmem-for-rt.patch2
-rw-r--r--debian/patches-rt/arm-highmem-flush-tlb-on-unmap.patch2
-rw-r--r--debian/patches-rt/arm-preempt-lazy-support.patch2
-rw-r--r--debian/patches-rt/arm-remove-printk_nmi_.patch26
-rw-r--r--debian/patches-rt/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch2
-rw-r--r--debian/patches-rt/block-mq-drop-preempt-disable.patch4
-rw-r--r--debian/patches-rt/bus-mhi-Remove-include-of-rwlock_types.h.patch2
-rw-r--r--debian/patches-rt/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch2
-rw-r--r--debian/patches-rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch2
-rw-r--r--debian/patches-rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch2
-rw-r--r--debian/patches-rt/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch2
-rw-r--r--debian/patches-rt/crypto-limit-more-FPU-enabled-sections.patch2
-rw-r--r--debian/patches-rt/debugobjects-rt.patch2
-rw-r--r--debian/patches-rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch2
-rw-r--r--debian/patches-rt/drivers-tty-fix-omap-lock-crap.patch2
-rw-r--r--debian/patches-rt/drivers-tty-pl011-irq-disable-madness.patch2
-rw-r--r--debian/patches-rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch2
-rw-r--r--debian/patches-rt/drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch2
-rw-r--r--debian/patches-rt/drm-i915-disable-tracing-on-RT.patch2
-rw-r--r--debian/patches-rt/drm-i915-gt-Only-disable-interrupts-for-the-timeline.patch2
-rw-r--r--debian/patches-rt/drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch2
-rw-r--r--debian/patches-rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch2
-rw-r--r--debian/patches-rt/efi-Allow-efi-runtime.patch2
-rw-r--r--debian/patches-rt/efi-Disable-runtime-services-on-RT.patch2
-rw-r--r--debian/patches-rt/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch2
-rw-r--r--debian/patches-rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch2
-rw-r--r--debian/patches-rt/fs-namespace-use-cpu-chill-in-trylock-loops.patch2
-rw-r--r--debian/patches-rt/ftrace-migrate-disable-tracing.patch2
-rw-r--r--debian/patches-rt/genirq-disable-irqpoll-on-rt.patch2
-rw-r--r--debian/patches-rt/genirq-update-irq_set_irqchip_state-documentation.patch2
-rw-r--r--debian/patches-rt/hrtimer-Allow-raw-wakeups-during-boot.patch2
-rw-r--r--debian/patches-rt/io_wq-Make-io_wqe-lock-a-raw_spinlock_t.patch2
-rw-r--r--debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch2
-rw-r--r--debian/patches-rt/jump-label-rt.patch2
-rw-r--r--debian/patches-rt/kconfig-disable-a-few-options-rt.patch2
-rw-r--r--debian/patches-rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch4
-rw-r--r--debian/patches-rt/leds-trigger-disable-CPU-trigger-on-RT.patch2
-rw-r--r--debian/patches-rt/lib-test_lockup-Minimum-fix-to-get-it-compiled-on-PR.patch58
-rw-r--r--debian/patches-rt/localversion.patch4
-rw-r--r--debian/patches-rt/lockdep-disable-self-test.patch4
-rw-r--r--debian/patches-rt/lockdep-no-softirq-accounting-on-rt.patch2
-rw-r--r--debian/patches-rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch2
-rw-r--r--debian/patches-rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch2
-rw-r--r--debian/patches-rt/locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch2
-rw-r--r--debian/patches-rt/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch2
-rw-r--r--debian/patches-rt/md-raid5-percpu-handling-rt-aware.patch2
-rw-r--r--debian/patches-rt/mips-disable-highmem-on-rt.patch2
-rw-r--r--debian/patches-rt/mm-disable-sloub-rt.patch6
-rw-r--r--debian/patches-rt/mm-fix-exec-activate_mm-vs-TLB-shootdown-and-lazy-tl.patch2
-rw-r--r--debian/patches-rt/mm-make-vmstat-rt-aware.patch2
-rw-r--r--debian/patches-rt/mm-memcontrol-Disable-preemption-in-__mod_memcg_lruv.patch38
-rw-r--r--debian/patches-rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch6
-rw-r--r--debian/patches-rt/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch24
-rw-r--r--debian/patches-rt/mm-memcontrol-do_not_disable_irq.patch14
-rw-r--r--debian/patches-rt/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch4
-rw-r--r--debian/patches-rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch32
-rw-r--r--debian/patches-rt/mm-rt-kmap-atomic-scheduling.patch2
-rw-r--r--debian/patches-rt/mm-scatterlist-dont-disable-irqs-on-RT.patch2
-rw-r--r--debian/patches-rt/mm-slub-Always-flush-the-delayed-empty-slubs-in-flus.patch2
-rw-r--r--debian/patches-rt/mm-slub-Make-object_map_lock-a-raw_spinlock_t.patch2
-rw-r--r--debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch2
-rw-r--r--debian/patches-rt/mm-workingset-replace-IRQ-off-check-with-a-lockdep-a.patch2
-rw-r--r--debian/patches-rt/mm-zswap-Use-local-lock-to-protect-per-CPU-data.patch2
-rw-r--r--debian/patches-rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch2
-rw-r--r--debian/patches-rt/net--Move-lockdep-where-it-belongs.patch2
-rw-r--r--debian/patches-rt/net-Dequeue-in-dev_cpu_dead-without-the-lock.patch4
-rw-r--r--debian/patches-rt/net-Properly-annotate-the-try-lock-for-the-seqlock.patch2
-rw-r--r--debian/patches-rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch2
-rw-r--r--debian/patches-rt/net-core-use-local_bh_disable-in-netif_rx_ni.patch2
-rw-r--r--debian/patches-rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch2
-rw-r--r--debian/patches-rt/net_disable_NET_RX_BUSY_POLL.patch2
-rw-r--r--debian/patches-rt/oleg-signal-rt-fix.patch2
-rw-r--r--debian/patches-rt/panic-disable-random-on-rt.patch2
-rw-r--r--debian/patches-rt/pid.h-include-atomic.h.patch2
-rw-r--r--debian/patches-rt/power-disable-highmem-on-rt.patch2
-rw-r--r--debian/patches-rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch2
-rw-r--r--debian/patches-rt/powerpc-preempt-lazy-support.patch2
-rw-r--r--debian/patches-rt/powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch2
-rw-r--r--debian/patches-rt/powerpc-remove-printk_nmi_.patch25
-rw-r--r--debian/patches-rt/powerpc-stackprotector-work-around-stack-guard-init-.patch2
-rw-r--r--debian/patches-rt/preempt-lazy-support.patch49
-rw-r--r--debian/patches-rt/preempt-nort-rt-variants.patch2
-rw-r--r--debian/patches-rt/printk-Force-a-line-break-on-pr_cont-n.patch34
-rw-r--r--debian/patches-rt/printk-Tiny-cleanup.patch155
-rw-r--r--debian/patches-rt/printk-console-must-not-schedule-for-drivers.patch45
-rw-r--r--debian/patches-rt/printk-devkmsg-llseek-reset-clear-if-it-is-lost.patch46
-rw-r--r--debian/patches-rt/printk-devkmsg-read-Return-EPIPE-when-the-first-mess.patch44
-rw-r--r--debian/patches-rt/printk-fix-ifnullfree.cocci-warnings.patch47
-rw-r--r--debian/patches-rt/printk-hack-out-emergency-loglevel-usage.patch53
-rw-r--r--debian/patches-rt/printk-handle-iterating-while-buffer-changing.patch44
-rw-r--r--debian/patches-rt/printk-kmsg_dump-remove-mutex-usage.patch85
-rw-r--r--debian/patches-rt/printk-only-allow-kernel-to-emergency-message.patch68
-rw-r--r--debian/patches-rt/printk-print-rate-limitted-message-as-info.patch25
-rw-r--r--debian/patches-rt/printk-set-deferred-to-default-loglevel-enforce-mask.patch39
-rw-r--r--debian/patches-rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch8
-rw-r--r--debian/patches-rt/random-make-it-work-on-rt.patch2
-rw-r--r--debian/patches-rt/rcu--Prevent-false-positive-softirq-warning-on-RT.patch2
-rw-r--r--debian/patches-rt/rcu-Use-rcuc-threads-on-PREEMPT_RT-as-we-did.patch2
-rw-r--r--debian/patches-rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch2
-rw-r--r--debian/patches-rt/rcu-make-RCU_BOOST-default-on-RT.patch2
-rw-r--r--debian/patches-rt/rcutorture-Avoid-problematic-critical-section-nestin.patch2
-rw-r--r--debian/patches-rt/rt-introduce-cpu-chill.patch2
-rw-r--r--debian/patches-rt/rt-local-irq-lock.patch2
-rw-r--r--debian/patches-rt/sched-disable-rt-group-sched-on-rt.patch4
-rw-r--r--debian/patches-rt/sched-disable-ttwu-queue.patch2
-rw-r--r--debian/patches-rt/sched-limit-nr-migrate.patch2
-rw-r--r--debian/patches-rt/sched-might-sleep-do-not-account-rcu-depth.patch4
-rw-r--r--debian/patches-rt/sched-mmdrop-delayed.patch6
-rw-r--r--debian/patches-rt/scsi-fcoe-rt-aware.patch2
-rw-r--r--debian/patches-rt/seqlock-Fix-multiple-kernel-doc-warnings.patch2
-rw-r--r--debian/patches-rt/seqlock-Unbreak-lockdep.patch69
-rw-r--r--debian/patches-rt/serial-8250-export-symbols-which-are-used-by-symbols.patch39
-rw-r--r--debian/patches-rt/serial-8250-fsl-ingenic-mtk-fix-atomic-console.patch103
-rw-r--r--debian/patches-rt/serial-8250-only-atomic-lock-for-console.patch385
-rw-r--r--debian/patches-rt/serial-8250-remove-that-trylock-in-serial8250_consol.patch44
-rw-r--r--debian/patches-rt/series153
-rw-r--r--debian/patches-rt/shmem-Use-raw_spinlock_t-for-stat_lock.patch2
-rw-r--r--debian/patches-rt/signal-Prevent-double-free-of-user-struct.patch2
-rw-r--r--debian/patches-rt/signal-revert-ptrace-preempt-magic.patch2
-rw-r--r--debian/patches-rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch2
-rw-r--r--debian/patches-rt/skbufhead-raw-lock.patch4
-rw-r--r--debian/patches-rt/slub-disable-SLUB_CPU_PARTIAL.patch4
-rw-r--r--debian/patches-rt/slub-enable-irqs-for-no-wait.patch2
-rw-r--r--debian/patches-rt/softirq--Add-RT-variant.patch2
-rw-r--r--debian/patches-rt/softirq--Replace-barrier---with-cpu_relax---in-tasklet_unlock_wait--.patch2
-rw-r--r--debian/patches-rt/softirq-disable-softirq-stacks-for-rt.patch2
-rw-r--r--debian/patches-rt/softirq-preempt-fix-3-re.patch25
-rw-r--r--debian/patches-rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch2
-rw-r--r--debian/patches-rt/sysfs-realtime-entry.patch2
-rw-r--r--debian/patches-rt/tasklets--Avoid-cancel-kill-deadlock-on-RT.patch2
-rw-r--r--debian/patches-rt/tasklets-Use-static-line-for-functions.patch2
-rw-r--r--debian/patches-rt/tcp-Remove-superfluous-BH-disable-around-listening_h.patch15
-rw-r--r--debian/patches-rt/tick-sched--Prevent-false-positive-softirq-pending-warnings-on-RT.patch2
-rw-r--r--debian/patches-rt/tpm-remove-tpm_dev_wq_lock.patch2
-rw-r--r--debian/patches-rt/tpm_tis-fix-stall-after-iowrite-s.patch2
-rw-r--r--debian/patches-rt/u64_stats-Disable-preemption-on-32bit-UP-SMP-with-RT.patch2
-rw-r--r--debian/patches-rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch2
-rw-r--r--debian/patches-rt/wait.h-include-atomic.h.patch2
-rw-r--r--debian/patches-rt/x86-Enable-RT-also-on-32bit.patch2
-rw-r--r--debian/patches-rt/x86-Enable-RT.patch2
-rw-r--r--debian/patches-rt/x86-crypto-reduce-preempt-disabled-regions.patch2
-rw-r--r--debian/patches-rt/x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch2
-rw-r--r--debian/patches-rt/x86-fpu--Do-not-disable-BH-on-RT.patch2
-rw-r--r--debian/patches-rt/x86-highmem-add-a-already-used-pte-check.patch2
-rw-r--r--debian/patches-rt/x86-kvm-require-const-tsc-for-rt.patch2
-rw-r--r--debian/patches-rt/x86-preempt-lazy.patch2
-rw-r--r--debian/patches-rt/x86-stackprot-no-random-on-rt.patch2
280 files changed, 13532 insertions, 7228 deletions
diff --git a/debian/changelog b/debian/changelog
index 08b1dafaf..4605de54b 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -664,6 +664,9 @@ linux (5.9.2-1) UNRELEASED; urgency=medium
* [arm64] Add i2c_mv64xxx i2c-modules udeb.
* [arm64] Add drivers/pinctrl to kernel-image udeb.
+ [ Salvatore Bonaccorso ]
+ * [rt] Update to 5.9.1-rt20
+
-- Sudip Mukherjee <sudipm.mukherjee@gmail.com> Sun, 18 Oct 2020 20:07:46 +0100
linux (5.9.1-1) unstable; urgency=medium
diff --git a/debian/patches-rt/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/debian/patches-rt/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
index fcbac5fb2..575493fb2 100644
--- a/debian/patches-rt/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
+++ b/debian/patches-rt/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
@@ -2,7 +2,7 @@ From: Peter Zijlstra <peterz@infradead.org>
Date: Mon, 28 May 2018 15:24:20 +0200
Subject: [PATCH 1/4] Split IRQ-off and zone->lock while freeing pages from PCP
list #1
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Split the IRQ-off section while accessing the PCP list from zone->lock
while freeing pages.
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -1282,7 +1282,7 @@ static inline void prefetch_buddy(struct
+@@ -1283,7 +1283,7 @@ static inline void prefetch_buddy(struct
}
/*
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
-@@ -1293,14 +1293,40 @@ static inline void prefetch_buddy(struct
+@@ -1294,14 +1294,40 @@ static inline void prefetch_buddy(struct
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Ensure proper count is passed which otherwise would stuck in the
-@@ -1337,7 +1363,7 @@ static void free_pcppages_bulk(struct zo
+@@ -1338,7 +1364,7 @@ static void free_pcppages_bulk(struct zo
if (bulkfree_pcp_prepare(page))
continue;
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We are going to put the page back to the global
-@@ -1352,26 +1378,6 @@ static void free_pcppages_bulk(struct zo
+@@ -1353,26 +1379,6 @@ static void free_pcppages_bulk(struct zo
prefetch_buddy(page);
} while (--count && --batch_free && !list_empty(list));
}
@@ -108,7 +108,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void free_one_page(struct zone *zone,
-@@ -2876,13 +2882,18 @@ void drain_zone_pages(struct zone *zone,
+@@ -2877,13 +2883,18 @@ void drain_zone_pages(struct zone *zone,
{
unsigned long flags;
int to_drain, batch;
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#endif
-@@ -2898,14 +2909,21 @@ static void drain_pages_zone(unsigned in
+@@ -2899,14 +2910,21 @@ static void drain_pages_zone(unsigned in
unsigned long flags;
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -152,7 +152,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3133,7 +3151,10 @@ static void free_unref_page_commit(struc
+@@ -3134,7 +3152,10 @@ static void free_unref_page_commit(struc
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
diff --git a/debian/patches-rt/0001-blk-mq-Don-t-complete-on-a-remote-CPU-in-force-threa.patch b/debian/patches-rt/0001-blk-mq-Don-t-complete-on-a-remote-CPU-in-force-threa.patch
new file mode 100644
index 000000000..0cebde8e6
--- /dev/null
+++ b/debian/patches-rt/0001-blk-mq-Don-t-complete-on-a-remote-CPU-in-force-threa.patch
@@ -0,0 +1,38 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 28 Oct 2020 11:07:44 +0100
+Subject: [PATCH 1/3] blk-mq: Don't complete on a remote CPU in force threaded
+ mode
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+With force threaded interrupts enabled, raising softirq from an SMP
+function call will always result in waking the ksoftirqd thread. This is
+not optimal given that the thread runs at SCHED_OTHER priority.
+
+Completing the request in hard IRQ-context on PREEMPT_RT (which enforces
+the force threaded mode) is bad because the completion handler may
+acquire sleeping locks which violate the locking context.
+
+Disable request completing on a remote CPU in force threaded mode.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ block/blk-mq.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -648,6 +648,14 @@ static inline bool blk_mq_complete_need_
+ if (!IS_ENABLED(CONFIG_SMP) ||
+ !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
+ return false;
++ /*
++ * With force threaded interrupts enabled, raising softirq from an SMP
++ * function call will always result in waking the ksoftirqd thread.
++ * This is probably worse than completing the request on a different
++ * cache domain.
++ */
++ if (force_irqthreads)
++ return false;
+
+ /* same CPU or cache domain? Complete locally */
+ if (cpu == rq->mq_ctx->cpu ||
diff --git a/debian/patches-rt/0001-crash-add-VMCOREINFO-macro-to-define-offset-in-a-str.patch b/debian/patches-rt/0001-crash-add-VMCOREINFO-macro-to-define-offset-in-a-str.patch
new file mode 100644
index 000000000..94385c976
--- /dev/null
+++ b/debian/patches-rt/0001-crash-add-VMCOREINFO-macro-to-define-offset-in-a-str.patch
@@ -0,0 +1,34 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Thu, 9 Jul 2020 15:29:41 +0206
+Subject: [PATCH 01/25] crash: add VMCOREINFO macro to define offset in a
+ struct declared by typedef
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+The existing macro VMCOREINFO_OFFSET() can't be used for structures
+declared via typedef because "struct" is not part of type definition.
+
+Create another macro for this purpose.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Acked-by: Baoquan He <bhe@redhat.com>
+Acked-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200709132344.760-2-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/crash_core.h | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/include/linux/crash_core.h
++++ b/include/linux/crash_core.h
+@@ -55,6 +55,9 @@ phys_addr_t paddr_vmcoreinfo_note(void);
+ #define VMCOREINFO_OFFSET(name, field) \
+ vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
+ (unsigned long)offsetof(struct name, field))
++#define VMCOREINFO_TYPE_OFFSET(name, field) \
++ vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
++ (unsigned long)offsetof(name, field))
+ #define VMCOREINFO_LENGTH(name, value) \
+ vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value)
+ #define VMCOREINFO_NUMBER(name) \
diff --git a/debian/patches-rt/0001-locking-rtmutex-Remove-cruft.patch b/debian/patches-rt/0001-locking-rtmutex-Remove-cruft.patch
index 293ce26f7..62552557f 100644
--- a/debian/patches-rt/0001-locking-rtmutex-Remove-cruft.patch
+++ b/debian/patches-rt/0001-locking-rtmutex-Remove-cruft.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 29 Sep 2020 15:21:17 +0200
-Subject: [PATCH 01/23] locking/rtmutex: Remove cruft
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Subject: [PATCH 01/22] locking/rtmutex: Remove cruft
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Most of this is around since the very beginning. I'm not sure if this
was used while the rtmutex-deadlock-tester was around but today it seems
diff --git a/debian/patches-rt/0001-printk-rb-add-printk-ring-buffer-documentation.patch b/debian/patches-rt/0001-printk-rb-add-printk-ring-buffer-documentation.patch
deleted file mode 100644
index 92095b3a7..000000000
--- a/debian/patches-rt/0001-printk-rb-add-printk-ring-buffer-documentation.patch
+++ /dev/null
@@ -1,394 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:39 +0100
-Subject: [PATCH 01/25] printk-rb: add printk ring buffer documentation
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-The full documentation file for the printk ring buffer.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- Documentation/printk-ringbuffer.txt | 377 ++++++++++++++++++++++++++++++++++++
- 1 file changed, 377 insertions(+)
- create mode 100644 Documentation/printk-ringbuffer.txt
-
---- /dev/null
-+++ b/Documentation/printk-ringbuffer.txt
-@@ -0,0 +1,377 @@
-+struct printk_ringbuffer
-+------------------------
-+John Ogness <john.ogness@linutronix.de>
-+
-+Overview
-+~~~~~~~~
-+As the name suggests, this ring buffer was implemented specifically to serve
-+the needs of the printk() infrastructure. The ring buffer itself is not
-+specific to printk and could be used for other purposes. _However_, the
-+requirements and semantics of printk are rather unique. If you intend to use
-+this ring buffer for anything other than printk, you need to be very clear on
-+its features, behavior, and pitfalls.
-+
-+Features
-+^^^^^^^^
-+The printk ring buffer has the following features:
-+
-+- single global buffer
-+- resides in initialized data section (available at early boot)
-+- lockless readers
-+- supports multiple writers
-+- supports multiple non-consuming readers
-+- safe from any context (including NMI)
-+- groups bytes into variable length blocks (referenced by entries)
-+- entries tagged with sequence numbers
-+
-+Behavior
-+^^^^^^^^
-+Since the printk ring buffer readers are lockless, there exists no
-+synchronization between readers and writers. Basically writers are the tasks
-+in control and may overwrite any and all committed data at any time and from
-+any context. For this reason readers can miss entries if they are overwritten
-+before the reader was able to access the data. The reader API implementation
-+is such that reader access to entries is atomic, so there is no risk of
-+readers having to deal with partial or corrupt data. Also, entries are
-+tagged with sequence numbers so readers can recognize if entries were missed.
-+
-+Writing to the ring buffer consists of 2 steps. First a writer must reserve
-+an entry of desired size. After this step the writer has exclusive access
-+to the memory region. Once the data has been written to memory, it needs to
-+be committed to the ring buffer. After this step the entry has been inserted
-+into the ring buffer and assigned an appropriate sequence number.
-+
-+Once committed, a writer must no longer access the data directly. This is
-+because the data may have been overwritten and no longer exists. If a
-+writer must access the data, it should either keep a private copy before
-+committing the entry or use the reader API to gain access to the data.
-+
-+Because of how the data backend is implemented, entries that have been
-+reserved but not yet committed act as barriers, preventing future writers
-+from filling the ring buffer beyond the location of the reserved but not
-+yet committed entry region. For this reason it is *important* that writers
-+perform both reserve and commit as quickly as possible. Also, be aware that
-+preemption and local interrupts are disabled and writing to the ring buffer
-+is processor-reentrant locked during the reserve/commit window. Writers in
-+NMI contexts can still preempt any other writers, but as long as these
-+writers do not write a large amount of data with respect to the ring buffer
-+size, this should not become an issue.
-+
-+API
-+~~~
-+
-+Declaration
-+^^^^^^^^^^^
-+The printk ring buffer can be instantiated as a static structure:
-+
-+ /* declare a static struct printk_ringbuffer */
-+ #define DECLARE_STATIC_PRINTKRB(name, szbits, cpulockptr)
-+
-+The value of szbits specifies the size of the ring buffer in bits. The
-+cpulockptr field is a pointer to a prb_cpulock struct that is used to
-+perform processor-reentrant spin locking for the writers. It is specified
-+externally because it may be used for multiple ring buffers (or other
-+code) to synchronize writers without risk of deadlock.
-+
-+Here is an example of a declaration of a printk ring buffer specifying a
-+32KB (2^15) ring buffer:
-+
-+....
-+DECLARE_STATIC_PRINTKRB_CPULOCK(rb_cpulock);
-+DECLARE_STATIC_PRINTKRB(rb, 15, &rb_cpulock);
-+....
-+
-+If writers will be using multiple ring buffers and the ordering of that usage
-+is not clear, the same prb_cpulock should be used for both ring buffers.
-+
-+Writer API
-+^^^^^^^^^^
-+The writer API consists of 2 functions. The first is to reserve an entry in
-+the ring buffer, the second is to commit that data to the ring buffer. The
-+reserved entry information is stored within a provided `struct prb_handle`.
-+
-+ /* reserve an entry */
-+ char *prb_reserve(struct prb_handle *h, struct printk_ringbuffer *rb,
-+ unsigned int size);
-+
-+ /* commit a reserved entry to the ring buffer */
-+ void prb_commit(struct prb_handle *h);
-+
-+Here is an example of a function to write data to a ring buffer:
-+
-+....
-+int write_data(struct printk_ringbuffer *rb, char *data, int size)
-+{
-+ struct prb_handle h;
-+ char *buf;
-+
-+ buf = prb_reserve(&h, rb, size);
-+ if (!buf)
-+ return -1;
-+ memcpy(buf, data, size);
-+ prb_commit(&h);
-+
-+ return 0;
-+}
-+....
-+
-+Pitfalls
-+++++++++
-+Be aware that prb_reserve() can fail. A retry might be successful, but it
-+depends entirely on whether or not the next part of the ring buffer to
-+overwrite belongs to reserved but not yet committed entries of other writers.
-+Writers can use the prb_inc_lost() function to allow readers to notice that a
-+message was lost.
-+
-+Reader API
-+^^^^^^^^^^
-+The reader API utilizes a `struct prb_iterator` to track the reader's
-+position in the ring buffer.
-+
-+ /* declare a pre-initialized static iterator for a ring buffer */
-+ #define DECLARE_STATIC_PRINTKRB_ITER(name, rbaddr)
-+
-+ /* initialize iterator for a ring buffer (if static macro NOT used) */
-+ void prb_iter_init(struct prb_iterator *iter,
-+ struct printk_ringbuffer *rb, u64 *seq);
-+
-+ /* make a deep copy of an iterator */
-+ void prb_iter_copy(struct prb_iterator *dest,
-+ struct prb_iterator *src);
-+
-+ /* non-blocking, advance to next entry (and read the data) */
-+ int prb_iter_next(struct prb_iterator *iter, char *buf,
-+ int size, u64 *seq);
-+
-+ /* blocking, advance to next entry (and read the data) */
-+ int prb_iter_wait_next(struct prb_iterator *iter, char *buf,
-+ int size, u64 *seq);
-+
-+ /* position iterator at the entry seq */
-+ int prb_iter_seek(struct prb_iterator *iter, u64 seq);
-+
-+ /* read data at current position */
-+ int prb_iter_data(struct prb_iterator *iter, char *buf,
-+ int size, u64 *seq);
-+
-+Typically prb_iter_data() is not needed because the data can be retrieved
-+directly with prb_iter_next().
-+
-+Here is an example of a non-blocking function that will read all the data in
-+a ring buffer:
-+
-+....
-+void read_all_data(struct printk_ringbuffer *rb, char *buf, int size)
-+{
-+ struct prb_iterator iter;
-+ u64 prev_seq = 0;
-+ u64 seq;
-+ int ret;
-+
-+ prb_iter_init(&iter, rb, NULL);
-+
-+ for (;;) {
-+ ret = prb_iter_next(&iter, buf, size, &seq);
-+ if (ret > 0) {
-+ if (seq != ++prev_seq) {
-+ /* "seq - prev_seq" entries missed */
-+ prev_seq = seq;
-+ }
-+ /* process buf here */
-+ } else if (ret == 0) {
-+ /* hit the end, done */
-+ break;
-+ } else if (ret < 0) {
-+ /*
-+ * iterator is invalid, a writer overtook us, reset the
-+ * iterator and keep going, entries were missed
-+ */
-+ prb_iter_init(&iter, rb, NULL);
-+ }
-+ }
-+}
-+....
-+
-+Pitfalls
-+++++++++
-+The reader's iterator can become invalid at any time because the reader was
-+overtaken by a writer. Typically the reader should reset the iterator back
-+to the current oldest entry (which will be newer than the entry the reader
-+was at) and continue, noting the number of entries that were missed.
-+
-+Utility API
-+^^^^^^^^^^^
-+Several functions are available as convenience for external code.
-+
-+ /* query the size of the data buffer */
-+ int prb_buffer_size(struct printk_ringbuffer *rb);
-+
-+ /* skip a seq number to signify a lost record */
-+ void prb_inc_lost(struct printk_ringbuffer *rb);
-+
-+ /* processor-reentrant spin lock */
-+ void prb_lock(struct prb_cpulock *cpu_lock, unsigned int *cpu_store);
-+
-+ /* processor-reentrant spin unlock */
-+ void prb_lock(struct prb_cpulock *cpu_lock, unsigned int *cpu_store);
-+
-+Pitfalls
-+++++++++
-+Although the value returned by prb_buffer_size() does represent an absolute
-+upper bound, the amount of data that can be stored within the ring buffer
-+is actually less because of the additional storage space of a header for each
-+entry.
-+
-+The prb_lock() and prb_unlock() functions can be used to synchronize between
-+ring buffer writers and other external activities. The function of a
-+processor-reentrant spin lock is to disable preemption and local interrupts
-+and synchronize against other processors. It does *not* protect against
-+multiple contexts of a single processor, i.e NMI.
-+
-+Implementation
-+~~~~~~~~~~~~~~
-+This section describes several of the implementation concepts and details to
-+help developers better understand the code.
-+
-+Entries
-+^^^^^^^
-+All ring buffer data is stored within a single static byte array. The reason
-+for this is to ensure that any pointers to the data (past and present) will
-+always point to valid memory. This is important because the lockless readers
-+may be preempted for long periods of time and when they resume may be working
-+with expired pointers.
-+
-+Entries are identified by start index and size. (The start index plus size
-+is the start index of the next entry.) The start index is not simply an
-+offset into the byte array, but rather a logical position (lpos) that maps
-+directly to byte array offsets.
-+
-+For example, for a byte array of 1000, an entry may have have a start index
-+of 100. Another entry may have a start index of 1100. And yet another 2100.
-+All of these entry are pointing to the same memory region, but only the most
-+recent entry is valid. The other entries are pointing to valid memory, but
-+represent entries that have been overwritten.
-+
-+Note that due to overflowing, the most recent entry is not necessarily the one
-+with the highest lpos value. Indeed, the printk ring buffer initializes its
-+data such that an overflow happens relatively quickly in order to validate the
-+handling of this situation. The implementation assumes that an lpos (unsigned
-+long) will never completely wrap while a reader is preempted. If this were to
-+become an issue, the seq number (which never wraps) could be used to increase
-+the robustness of handling this situation.
-+
-+Buffer Wrapping
-+^^^^^^^^^^^^^^^
-+If an entry starts near the end of the byte array but would extend beyond it,
-+a special terminating entry (size = -1) is inserted into the byte array and
-+the real entry is placed at the beginning of the byte array. This can waste
-+space at the end of the byte array, but simplifies the implementation by
-+allowing writers to always work with contiguous buffers.
-+
-+Note that the size field is the first 4 bytes of the entry header. Also note
-+that calc_next() always ensures that there are at least 4 bytes left at the
-+end of the byte array to allow room for a terminating entry.
-+
-+Ring Buffer Pointers
-+^^^^^^^^^^^^^^^^^^^^
-+Three pointers (lpos values) are used to manage the ring buffer:
-+
-+ - _tail_: points to the oldest entry
-+ - _head_: points to where the next new committed entry will be
-+ - _reserve_: points to where the next new reserved entry will be
-+
-+These pointers always maintain a logical ordering:
-+
-+ tail <= head <= reserve
-+
-+The reserve pointer moves forward when a writer reserves a new entry. The
-+head pointer moves forward when a writer commits a new entry.
-+
-+The reserve pointer cannot overwrite the tail pointer in a wrap situation. In
-+such a situation, the tail pointer must be "pushed forward", thus
-+invalidating that oldest entry. Readers identify if they are accessing a
-+valid entry by ensuring their entry pointer is `>= tail && < head`.
-+
-+If the tail pointer is equal to the head pointer, it cannot be pushed and any
-+reserve operation will fail. The only resolution is for writers to commit
-+their reserved entries.
-+
-+Processor-Reentrant Locking
-+^^^^^^^^^^^^^^^^^^^^^^^^^^^
-+The purpose of the processor-reentrant locking is to limit the interruption
-+scenarios of writers to 2 contexts. This allows for a simplified
-+implementation where:
-+
-+- The reserve/commit window only exists on 1 processor at a time. A reserve
-+ can never fail due to uncommitted entries of other processors.
-+
-+- When committing entries, it is trivial to handle the situation when
-+ subsequent entries have already been committed, i.e. managing the head
-+ pointer.
-+
-+Performance
-+~~~~~~~~~~~
-+Some basic tests were performed on a quad Intel(R) Xeon(R) CPU E5-2697 v4 at
-+2.30GHz (36 cores / 72 threads). All tests involved writing a total of
-+32,000,000 records at an average of 33 bytes each. Each writer was pinned to
-+its own CPU and would write as fast as it could until a total of 32,000,000
-+records were written. All tests involved 2 readers that were both pinned
-+together to another CPU. Each reader would read as fast as it could and track
-+how many of the 32,000,000 records it could read. All tests used a ring buffer
-+of 16KB in size, which holds around 350 records (header + data for each
-+entry).
-+
-+The only difference between the tests is the number of writers (and thus also
-+the number of records per writer). As more writers are added, the time to
-+write a record increases. This is because data pointers, modified via cmpxchg,
-+and global data access in general become more contended.
-+
-+1 writer
-+^^^^^^^^
-+ runtime: 0m 18s
-+ reader1: 16219900/32000000 (50%) records
-+ reader2: 16141582/32000000 (50%) records
-+
-+2 writers
-+^^^^^^^^^
-+ runtime: 0m 32s
-+ reader1: 16327957/32000000 (51%) records
-+ reader2: 16313988/32000000 (50%) records
-+
-+4 writers
-+^^^^^^^^^
-+ runtime: 0m 42s
-+ reader1: 16421642/32000000 (51%) records
-+ reader2: 16417224/32000000 (51%) records
-+
-+8 writers
-+^^^^^^^^^
-+ runtime: 0m 43s
-+ reader1: 16418300/32000000 (51%) records
-+ reader2: 16432222/32000000 (51%) records
-+
-+16 writers
-+^^^^^^^^^^
-+ runtime: 0m 54s
-+ reader1: 16539189/32000000 (51%) records
-+ reader2: 16542711/32000000 (51%) records
-+
-+32 writers
-+^^^^^^^^^^
-+ runtime: 1m 13s
-+ reader1: 16731808/32000000 (52%) records
-+ reader2: 16735119/32000000 (52%) records
-+
-+Comments
-+^^^^^^^^
-+It is particularly interesting to compare/contrast the 1-writer and 32-writer
-+tests. Despite the writing of the 32,000,000 records taking over 4 times
-+longer, the readers (which perform no cmpxchg) were still unable to keep up.
-+This shows that the memory contention between the increasing number of CPUs
-+also has a dramatic effect on readers.
-+
-+It should also be noted that in all cases each reader was able to read >=50%
-+of the records. This means that a single reader would have been able to keep
-+up with the writer(s) in all cases, becoming slightly easier as more writers
-+are added. This was the purpose of pinning 2 readers to 1 CPU: to observe how
-+maximum reader performance changes.
diff --git a/debian/patches-rt/0001-printk-refactor-kmsg_dump_get_buffer.patch b/debian/patches-rt/0001-printk-refactor-kmsg_dump_get_buffer.patch
new file mode 100644
index 000000000..9522ccd29
--- /dev/null
+++ b/debian/patches-rt/0001-printk-refactor-kmsg_dump_get_buffer.patch
@@ -0,0 +1,123 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 14 Oct 2020 19:09:15 +0200
+Subject: [PATCH 01/15] printk: refactor kmsg_dump_get_buffer()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+kmsg_dump_get_buffer() requires nearly the same logic as
+syslog_print_all(), but uses different variable names and
+does not make use of the ringbuffer loop macros. Modify
+kmsg_dump_get_buffer() so that the implementation is as similar
+to syslog_print_all() as possible.
+
+At some point it would be nice to have this code factored into a
+helper function. But until then, the code should at least look
+similar enough so that it is obvious there is logic duplication
+implemented.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 57 ++++++++++++++++++++++++-------------------------
+ 1 file changed, 29 insertions(+), 28 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3342,7 +3342,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
+ * read.
+ */
+ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+- char *buf, size_t size, size_t *len)
++ char *buf, size_t size, size_t *len_out)
+ {
+ struct printk_info info;
+ unsigned int line_count;
+@@ -3350,12 +3350,10 @@ bool kmsg_dump_get_buffer(struct kmsg_du
+ unsigned long flags;
+ u64 seq;
+ u64 next_seq;
+- size_t l = 0;
++ size_t len = 0;
+ bool ret = false;
+ bool time = printk_time;
+
+- prb_rec_init_rd(&r, &info, buf, size);
+-
+ if (!dumper->active || !buf || !size)
+ goto out;
+
+@@ -3371,48 +3369,51 @@ bool kmsg_dump_get_buffer(struct kmsg_du
+ goto out;
+ }
+
+- /* calculate length of entire buffer */
+- seq = dumper->cur_seq;
+- while (prb_read_valid_info(prb, seq, &info, &line_count)) {
+- if (r.info->seq >= dumper->next_seq)
++ /*
++ * Find first record that fits, including all following records,
++ * into the user-provided buffer for this dump.
++ */
++
++ prb_for_each_info(dumper->cur_seq, prb, seq, &info, &line_count) {
++ if (info.seq >= dumper->next_seq)
+ break;
+- l += get_record_print_text_size(&info, line_count, true, time);
+- seq = r.info->seq + 1;
++ len += get_record_print_text_size(&info, line_count, true, time);
+ }
+
+- /* move first record forward until length fits into the buffer */
+- seq = dumper->cur_seq;
+- while (l >= size && prb_read_valid_info(prb, seq,
+- &info, &line_count)) {
+- if (r.info->seq >= dumper->next_seq)
++ /*
++ * Move first record forward until length fits into the buffer. This
++ * is a best effort attempt. If @dumper->next_seq is reached because
++ * the ringbuffer is wrapping too fast, just start filling the buffer
++ * from there.
++ */
++ prb_for_each_info(dumper->cur_seq, prb, seq, &info, &line_count) {
++ if (len <= size || info.seq >= dumper->next_seq)
+ break;
+- l -= get_record_print_text_size(&info, line_count, true, time);
+- seq = r.info->seq + 1;
++ len -= get_record_print_text_size(&info, line_count, true, time);
+ }
+
+- /* last message in next interation */
++ /* Keep track of the last message for the next interation. */
+ next_seq = seq;
+
+- /* actually read text into the buffer now */
+- l = 0;
+- while (prb_read_valid(prb, seq, &r)) {
++ prb_rec_init_rd(&r, &info, buf, size);
++
++ len = 0;
++ prb_for_each_record(seq, prb, seq, &r) {
+ if (r.info->seq >= dumper->next_seq)
+ break;
+
+- l += record_print_text(&r, syslog, time);
+-
+- /* adjust record to store to remaining buffer space */
+- prb_rec_init_rd(&r, &info, buf + l, size - l);
++ len += record_print_text(&r, syslog, time);
+
+- seq = r.info->seq + 1;
++ /* Adjust record to store to remaining buffer space. */
++ prb_rec_init_rd(&r, &info, buf + len, size - len);
+ }
+
+ dumper->next_seq = next_seq;
+ ret = true;
+ logbuf_unlock_irqrestore(flags);
+ out:
+- if (len)
+- *len = l;
++ if (len_out)
++ *len_out = len;
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
diff --git a/debian/patches-rt/0001-stop_machine-Add-function-and-caller-debug-info.patch b/debian/patches-rt/0001-stop_machine-Add-function-and-caller-debug-info.patch
index b571db69f..db3476e5c 100644
--- a/debian/patches-rt/0001-stop_machine-Add-function-and-caller-debug-info.patch
+++ b/debian/patches-rt/0001-stop_machine-Add-function-and-caller-debug-info.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz@infradead.org>
-Date: Mon, 5 Oct 2020 16:57:18 +0200
-Subject: [PATCH 01/17] stop_machine: Add function and caller debug info
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Date: Fri, 23 Oct 2020 12:11:59 +0200
+Subject: [PATCH 01/19] stop_machine: Add function and caller debug info
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Crashes in stop-machine are hard to connect to the calling code, add a
little something to help with that.
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
-@@ -24,6 +24,7 @@ typedef int (*cpu_stop_fn_t)(void *arg);
+@@ -24,6 +24,7 @@
struct cpu_stop_work {
struct list_head list; /* cpu_stopper->works */
cpu_stop_fn_t fn;
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void *arg;
struct cpu_stop_done *done;
};
-@@ -36,6 +37,8 @@ void stop_machine_park(int cpu);
+@@ -36,6 +37,8 @@
void stop_machine_unpark(int cpu);
void stop_machine_yield(const struct cpumask *cpumask);
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#else /* CONFIG_SMP */
#include <linux/workqueue.h>
-@@ -80,6 +83,8 @@ static inline bool stop_one_cpu_nowait(u
+@@ -80,6 +83,8 @@
return false;
}
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
-@@ -42,11 +42,23 @@ struct cpu_stopper {
+@@ -42,11 +42,23 @@
struct list_head works; /* list of pending works */
struct cpu_stop_work stop_work; /* for stop_cpus */
@@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* static data for stop_cpus */
static DEFINE_MUTEX(stop_cpus_mutex);
static bool stop_cpus_in_progress;
-@@ -123,7 +135,7 @@ static bool cpu_stop_queue_work(unsigned
+@@ -123,7 +135,7 @@
int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
{
struct cpu_stop_done done;
@@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cpu_stop_init_done(&done, 1);
if (!cpu_stop_queue_work(cpu, &work))
-@@ -331,7 +343,8 @@ int stop_two_cpus(unsigned int cpu1, uns
+@@ -331,7 +343,8 @@
work1 = work2 = (struct cpu_stop_work){
.fn = multi_cpu_stop,
.arg = &msdata,
@@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
cpu_stop_init_done(&done, 2);
-@@ -367,7 +380,7 @@ int stop_two_cpus(unsigned int cpu1, uns
+@@ -367,7 +380,7 @@
bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
struct cpu_stop_work *work_buf)
{
@@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return cpu_stop_queue_work(cpu, work_buf);
}
-@@ -487,6 +500,8 @@ static void cpu_stopper_thread(unsigned
+@@ -487,6 +500,8 @@
int ret;
/* cpu stop callbacks must not sleep, make in_atomic() == T */
@@ -105,7 +105,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_count_inc();
ret = fn(arg);
if (done) {
-@@ -495,6 +510,8 @@ static void cpu_stopper_thread(unsigned
+@@ -495,6 +510,8 @@
cpu_stop_signal_done(done);
}
preempt_count_dec();
diff --git a/debian/patches-rt/0001-time-sched_clock-Use-raw_read_seqcount_latch-during-.patch b/debian/patches-rt/0001-time-sched_clock-Use-raw_read_seqcount_latch-during-.patch
index 4fa202afe..96629a193 100644
--- a/debian/patches-rt/0001-time-sched_clock-Use-raw_read_seqcount_latch-during-.patch
+++ b/debian/patches-rt/0001-time-sched_clock-Use-raw_read_seqcount_latch-during-.patch
@@ -2,7 +2,7 @@ From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
Date: Thu, 27 Aug 2020 13:40:37 +0200
Subject: [PATCH 01/13] time/sched_clock: Use raw_read_seqcount_latch() during
suspend
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
sched_clock uses seqcount_t latching to switch between two storage
places protected by the sequence counter. This allows it to have
diff --git a/debian/patches-rt/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/debian/patches-rt/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
index 89aa0d342..573d0d364 100644
--- a/debian/patches-rt/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
+++ b/debian/patches-rt/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
@@ -2,7 +2,7 @@ From: Peter Zijlstra <peterz@infradead.org>
Date: Mon, 28 May 2018 15:24:21 +0200
Subject: [PATCH 2/4] Split IRQ-off and zone->lock while freeing pages from PCP
list #2
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Split the IRQ-off section while accessing the PCP list from zone->lock
while freeing pages.
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -1292,8 +1292,8 @@ static inline void prefetch_buddy(struct
+@@ -1293,8 +1293,8 @@ static inline void prefetch_buddy(struct
* And clear the zone's pages_scanned counter, to hold off the "all pages are
* pinned" detection logic.
*/
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
bool isolated_pageblocks;
struct page *page, *tmp;
-@@ -1308,12 +1308,27 @@ static void free_pcppages_bulk(struct zo
+@@ -1309,12 +1309,27 @@ static void free_pcppages_bulk(struct zo
*/
list_for_each_entry_safe(page, tmp, head, lru) {
int mt = get_pcppage_migratetype(page);
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__free_one_page(page, page_to_pfn(page), zone, 0, mt, true);
trace_mm_page_pcpu_drain(page, 0, mt);
}
-@@ -2893,7 +2908,7 @@ void drain_zone_pages(struct zone *zone,
+@@ -2894,7 +2909,7 @@ void drain_zone_pages(struct zone *zone,
local_irq_restore(flags);
if (to_drain > 0)
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#endif
-@@ -2923,7 +2938,7 @@ static void drain_pages_zone(unsigned in
+@@ -2924,7 +2939,7 @@ static void drain_pages_zone(unsigned in
local_irq_restore(flags);
if (count)
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3122,7 +3137,8 @@ static bool free_unref_page_prepare(stru
+@@ -3123,7 +3138,8 @@ static bool free_unref_page_prepare(stru
return true;
}
@@ -85,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
-@@ -3151,10 +3167,8 @@ static void free_unref_page_commit(struc
+@@ -3152,10 +3168,8 @@ static void free_unref_page_commit(struc
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
@@ -97,7 +97,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -3165,13 +3179,17 @@ void free_unref_page(struct page *page)
+@@ -3166,13 +3180,17 @@ void free_unref_page(struct page *page)
{
unsigned long flags;
unsigned long pfn = page_to_pfn(page);
@@ -116,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3182,6 +3200,11 @@ void free_unref_page_list(struct list_he
+@@ -3183,6 +3201,11 @@ void free_unref_page_list(struct list_he
struct page *page, *next;
unsigned long flags, pfn;
int batch_count = 0;
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Prepare pages for freeing */
list_for_each_entry_safe(page, next, list, lru) {
-@@ -3194,10 +3217,12 @@ void free_unref_page_list(struct list_he
+@@ -3195,10 +3218,12 @@ void free_unref_page_list(struct list_he
local_irq_save(flags);
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_private(page);
@@ -142,7 +142,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Guard against excessive IRQ disabled times when we get
-@@ -3210,6 +3235,21 @@ void free_unref_page_list(struct list_he
+@@ -3211,6 +3236,21 @@ void free_unref_page_list(struct list_he
}
}
local_irq_restore(flags);
diff --git a/debian/patches-rt/0002-blk-mq-Always-complete-remote-completions-requests-i.patch b/debian/patches-rt/0002-blk-mq-Always-complete-remote-completions-requests-i.patch
new file mode 100644
index 000000000..a0f9c953c
--- /dev/null
+++ b/debian/patches-rt/0002-blk-mq-Always-complete-remote-completions-requests-i.patch
@@ -0,0 +1,39 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 28 Oct 2020 11:07:09 +0100
+Subject: [PATCH 2/3] blk-mq: Always complete remote completions requests in
+ softirq
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+Controllers with multiple queues have their IRQ-handelers pinned to a
+CPU. The core shouldn't need to complete the request on a remote CPU.
+
+Remove this case and always raise the softirq to complete the request.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ block/blk-mq.c | 14 +-------------
+ 1 file changed, 1 insertion(+), 13 deletions(-)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -626,19 +626,7 @@ static void __blk_mq_complete_request_re
+ {
+ struct request *rq = data;
+
+- /*
+- * For most of single queue controllers, there is only one irq vector
+- * for handling I/O completion, and the only irq's affinity is set
+- * to all possible CPUs. On most of ARCHs, this affinity means the irq
+- * is handled on one specific CPU.
+- *
+- * So complete I/O requests in softirq context in case of single queue
+- * devices to avoid degrading I/O performance due to irqsoff latency.
+- */
+- if (rq->q->nr_hw_queues == 1)
+- blk_mq_trigger_softirq(rq);
+- else
+- rq->q->mq_ops->complete(rq);
++ blk_mq_trigger_softirq(rq);
+ }
+
+ static inline bool blk_mq_complete_need_ipi(struct request *rq)
diff --git a/debian/patches-rt/0002-locking-rtmutex-Remove-output-from-deadlock-detector.patch b/debian/patches-rt/0002-locking-rtmutex-Remove-output-from-deadlock-detector.patch
index 6784c14bb..66446b3ad 100644
--- a/debian/patches-rt/0002-locking-rtmutex-Remove-output-from-deadlock-detector.patch
+++ b/debian/patches-rt/0002-locking-rtmutex-Remove-output-from-deadlock-detector.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 29 Sep 2020 16:05:11 +0200
-Subject: [PATCH 02/23] locking/rtmutex: Remove output from deadlock detector.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Subject: [PATCH 02/22] locking/rtmutex: Remove output from deadlock detector.
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
In commit
f5694788ad8da ("rt_mutex: Add lockdep annotations")
diff --git a/debian/patches-rt/0002-mm-swap-Do-not-abuse-the-seqcount_t-latching-API.patch b/debian/patches-rt/0002-mm-swap-Do-not-abuse-the-seqcount_t-latching-API.patch
index fd1c07272..56514c1f5 100644
--- a/debian/patches-rt/0002-mm-swap-Do-not-abuse-the-seqcount_t-latching-API.patch
+++ b/debian/patches-rt/0002-mm-swap-Do-not-abuse-the-seqcount_t-latching-API.patch
@@ -1,7 +1,7 @@
From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
Date: Thu, 27 Aug 2020 13:40:38 +0200
Subject: [PATCH 02/13] mm/swap: Do not abuse the seqcount_t latching API
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Commit eef1a429f234 ("mm/swap.c: piggyback lru_add_drain_all() calls")
implemented an optimization mechanism to exit the to-be-started LRU
diff --git a/debian/patches-rt/0002-printk-add-lockless-ringbuffer.patch b/debian/patches-rt/0002-printk-add-lockless-ringbuffer.patch
new file mode 100644
index 000000000..993b102df
--- /dev/null
+++ b/debian/patches-rt/0002-printk-add-lockless-ringbuffer.patch
@@ -0,0 +1,2148 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Thu, 9 Jul 2020 15:29:42 +0206
+Subject: [PATCH 02/25] printk: add lockless ringbuffer
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+Introduce a multi-reader multi-writer lockless ringbuffer for storing
+the kernel log messages. Readers and writers may use their API from
+any context (including scheduler and NMI). This ringbuffer will make
+it possible to decouple printk() callers from any context, locking,
+or console constraints. It also makes it possible for readers to have
+full access to the ringbuffer contents at any time and context (for
+example from any panic situation).
+
+The printk_ringbuffer is made up of 3 internal ringbuffers:
+
+desc_ring:
+A ring of descriptors. A descriptor contains all record meta data
+(sequence number, timestamp, loglevel, etc.) as well as internal state
+information about the record and logical positions specifying where in
+the other ringbuffers the text and dictionary strings are located.
+
+text_data_ring:
+A ring of data blocks. A data block consists of an unsigned long
+integer (ID) that maps to a desc_ring index followed by the text
+string of the record.
+
+dict_data_ring:
+A ring of data blocks. A data block consists of an unsigned long
+integer (ID) that maps to a desc_ring index followed by the dictionary
+string of the record.
+
+The internal state information of a descriptor is the key element to
+allow readers and writers to locklessly synchronize access to the data.
+
+Co-developed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Reviewed-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200709132344.760-3-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/Makefile | 1
+ kernel/printk/printk_ringbuffer.c | 1687 ++++++++++++++++++++++++++++++++++++++
+ kernel/printk/printk_ringbuffer.h | 399 ++++++++
+ 3 files changed, 2087 insertions(+)
+ create mode 100644 kernel/printk/printk_ringbuffer.c
+ create mode 100644 kernel/printk/printk_ringbuffer.h
+
+--- a/kernel/printk/Makefile
++++ b/kernel/printk/Makefile
+@@ -2,3 +2,4 @@
+ obj-y = printk.o
+ obj-$(CONFIG_PRINTK) += printk_safe.o
+ obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
++obj-$(CONFIG_PRINTK) += printk_ringbuffer.o
+--- /dev/null
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -0,0 +1,1687 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/kernel.h>
++#include <linux/irqflags.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/bug.h>
++#include "printk_ringbuffer.h"
++
++/**
++ * DOC: printk_ringbuffer overview
++ *
++ * Data Structure
++ * --------------
++ * The printk_ringbuffer is made up of 3 internal ringbuffers:
++ *
++ * desc_ring
++ * A ring of descriptors. A descriptor contains all record meta data
++ * (sequence number, timestamp, loglevel, etc.) as well as internal state
++ * information about the record and logical positions specifying where in
++ * the other ringbuffers the text and dictionary strings are located.
++ *
++ * text_data_ring
++ * A ring of data blocks. A data block consists of an unsigned long
++ * integer (ID) that maps to a desc_ring index followed by the text
++ * string of the record.
++ *
++ * dict_data_ring
++ * A ring of data blocks. A data block consists of an unsigned long
++ * integer (ID) that maps to a desc_ring index followed by the dictionary
++ * string of the record.
++ *
++ * The internal state information of a descriptor is the key element to allow
++ * readers and writers to locklessly synchronize access to the data.
++ *
++ * Implementation
++ * --------------
++ *
++ * Descriptor Ring
++ * ~~~~~~~~~~~~~~~
++ * The descriptor ring is an array of descriptors. A descriptor contains all
++ * the meta data of a printk record as well as blk_lpos structs pointing to
++ * associated text and dictionary data blocks (see "Data Rings" below). Each
++ * descriptor is assigned an ID that maps directly to index values of the
++ * descriptor array and has a state. The ID and the state are bitwise combined
++ * into a single descriptor field named @state_var, allowing ID and state to
++ * be synchronously and atomically updated.
++ *
++ * Descriptors have three states:
++ *
++ * reserved
++ * A writer is modifying the record.
++ *
++ * committed
++ * The record and all its data are complete and available for reading.
++ *
++ * reusable
++ * The record exists, but its text and/or dictionary data may no longer
++ * be available.
++ *
++ * Querying the @state_var of a record requires providing the ID of the
++ * descriptor to query. This can yield a possible fourth (pseudo) state:
++ *
++ * miss
++ * The descriptor being queried has an unexpected ID.
++ *
++ * The descriptor ring has a @tail_id that contains the ID of the oldest
++ * descriptor and @head_id that contains the ID of the newest descriptor.
++ *
++ * When a new descriptor should be created (and the ring is full), the tail
++ * descriptor is invalidated by first transitioning to the reusable state and
++ * then invalidating all tail data blocks up to and including the data blocks
++ * associated with the tail descriptor (for text and dictionary rings). Then
++ * @tail_id is advanced, followed by advancing @head_id. And finally the
++ * @state_var of the new descriptor is initialized to the new ID and reserved
++ * state.
++ *
++ * The @tail_id can only be advanced if the new @tail_id would be in the
++ * committed or reusable queried state. This makes it possible that a valid
++ * sequence number of the tail is always available.
++ *
++ * Data Rings
++ * ~~~~~~~~~~
++ * The two data rings (text and dictionary) function identically. They exist
++ * separately so that their buffer sizes can be individually set and they do
++ * not affect one another.
++ *
++ * Data rings are byte arrays composed of data blocks. Data blocks are
++ * referenced by blk_lpos structs that point to the logical position of the
++ * beginning of a data block and the beginning of the next adjacent data
++ * block. Logical positions are mapped directly to index values of the byte
++ * array ringbuffer.
++ *
++ * Each data block consists of an ID followed by the writer data. The ID is
++ * the identifier of a descriptor that is associated with the data block. A
++ * given data block is considered valid if all of the following conditions
++ * are met:
++ *
++ * 1) The descriptor associated with the data block is in the committed
++ * queried state.
++ *
++ * 2) The blk_lpos struct within the descriptor associated with the data
++ * block references back to the same data block.
++ *
++ * 3) The data block is within the head/tail logical position range.
++ *
++ * If the writer data of a data block would extend beyond the end of the
++ * byte array, only the ID of the data block is stored at the logical
++ * position and the full data block (ID and writer data) is stored at the
++ * beginning of the byte array. The referencing blk_lpos will point to the
++ * ID before the wrap and the next data block will be at the logical
++ * position adjacent the full data block after the wrap.
++ *
++ * Data rings have a @tail_lpos that points to the beginning of the oldest
++ * data block and a @head_lpos that points to the logical position of the
++ * next (not yet existing) data block.
++ *
++ * When a new data block should be created (and the ring is full), tail data
++ * blocks will first be invalidated by putting their associated descriptors
++ * into the reusable state and then pushing the @tail_lpos forward beyond
++ * them. Then the @head_lpos is pushed forward and is associated with a new
++ * descriptor. If a data block is not valid, the @tail_lpos cannot be
++ * advanced beyond it.
++ *
++ * Usage
++ * -----
++ * Here are some simple examples demonstrating writers and readers. For the
++ * examples a global ringbuffer (test_rb) is available (which is not the
++ * actual ringbuffer used by printk)::
++ *
++ * DEFINE_PRINTKRB(test_rb, 15, 5, 3);
++ *
++ * This ringbuffer allows up to 32768 records (2 ^ 15) and has a size of
++ * 1 MiB (2 ^ (15 + 5)) for text data and 256 KiB (2 ^ (15 + 3)) for
++ * dictionary data.
++ *
++ * Sample writer code::
++ *
++ * const char *dictstr = "dictionary text";
++ * const char *textstr = "message text";
++ * struct prb_reserved_entry e;
++ * struct printk_record r;
++ *
++ * // specify how much to allocate
++ * prb_rec_init_wr(&r, strlen(textstr) + 1, strlen(dictstr) + 1);
++ *
++ * if (prb_reserve(&e, &test_rb, &r)) {
++ * snprintf(r.text_buf, r.text_buf_size, "%s", textstr);
++ *
++ * // dictionary allocation may have failed
++ * if (r.dict_buf)
++ * snprintf(r.dict_buf, r.dict_buf_size, "%s", dictstr);
++ *
++ * r.info->ts_nsec = local_clock();
++ *
++ * prb_commit(&e);
++ * }
++ *
++ * Sample reader code::
++ *
++ * struct printk_info info;
++ * struct printk_record r;
++ * char text_buf[32];
++ * char dict_buf[32];
++ * u64 seq;
++ *
++ * prb_rec_init_rd(&r, &info, &text_buf[0], sizeof(text_buf),
++ * &dict_buf[0], sizeof(dict_buf));
++ *
++ * prb_for_each_record(0, &test_rb, &seq, &r) {
++ * if (info.seq != seq)
++ * pr_warn("lost %llu records\n", info.seq - seq);
++ *
++ * if (info.text_len > r.text_buf_size) {
++ * pr_warn("record %llu text truncated\n", info.seq);
++ * text_buf[r.text_buf_size - 1] = 0;
++ * }
++ *
++ * if (info.dict_len > r.dict_buf_size) {
++ * pr_warn("record %llu dict truncated\n", info.seq);
++ * dict_buf[r.dict_buf_size - 1] = 0;
++ * }
++ *
++ * pr_info("%llu: %llu: %s;%s\n", info.seq, info.ts_nsec,
++ * &text_buf[0], info.dict_len ? &dict_buf[0] : "");
++ * }
++ *
++ * Note that additional less convenient reader functions are available to
++ * allow complex record access.
++ *
++ * ABA Issues
++ * ~~~~~~~~~~
++ * To help avoid ABA issues, descriptors are referenced by IDs (array index
++ * values combined with tagged bits counting array wraps) and data blocks are
++ * referenced by logical positions (array index values combined with tagged
++ * bits counting array wraps). However, on 32-bit systems the number of
++ * tagged bits is relatively small such that an ABA incident is (at least
++ * theoretically) possible. For example, if 4 million maximally sized (1KiB)
++ * printk messages were to occur in NMI context on a 32-bit system, the
++ * interrupted context would not be able to recognize that the 32-bit integer
++ * completely wrapped and thus represents a different data block than the one
++ * the interrupted context expects.
++ *
++ * To help combat this possibility, additional state checking is performed
++ * (such as using cmpxchg() even though set() would suffice). These extra
++ * checks are commented as such and will hopefully catch any ABA issue that
++ * a 32-bit system might experience.
++ *
++ * Memory Barriers
++ * ~~~~~~~~~~~~~~~
++ * Multiple memory barriers are used. To simplify proving correctness and
++ * generating litmus tests, lines of code related to memory barriers
++ * (loads, stores, and the associated memory barriers) are labeled::
++ *
++ * LMM(function:letter)
++ *
++ * Comments reference the labels using only the "function:letter" part.
++ *
++ * The memory barrier pairs and their ordering are:
++ *
++ * desc_reserve:D / desc_reserve:B
++ * push descriptor tail (id), then push descriptor head (id)
++ *
++ * desc_reserve:D / data_push_tail:B
++ * push data tail (lpos), then set new descriptor reserved (state)
++ *
++ * desc_reserve:D / desc_push_tail:C
++ * push descriptor tail (id), then set new descriptor reserved (state)
++ *
++ * desc_reserve:D / prb_first_seq:C
++ * push descriptor tail (id), then set new descriptor reserved (state)
++ *
++ * desc_reserve:F / desc_read:D
++ * set new descriptor id and reserved (state), then allow writer changes
++ *
++ * data_alloc:A / desc_read:D
++ * set old descriptor reusable (state), then modify new data block area
++ *
++ * data_alloc:A / data_push_tail:B
++ * push data tail (lpos), then modify new data block area
++ *
++ * prb_commit:B / desc_read:B
++ * store writer changes, then set new descriptor committed (state)
++ *
++ * data_push_tail:D / data_push_tail:A
++ * set descriptor reusable (state), then push data tail (lpos)
++ *
++ * desc_push_tail:B / desc_reserve:D
++ * set descriptor reusable (state), then push descriptor tail (id)
++ */
++
++#define DATA_SIZE(data_ring) _DATA_SIZE((data_ring)->size_bits)
++#define DATA_SIZE_MASK(data_ring) (DATA_SIZE(data_ring) - 1)
++
++#define DESCS_COUNT(desc_ring) _DESCS_COUNT((desc_ring)->count_bits)
++#define DESCS_COUNT_MASK(desc_ring) (DESCS_COUNT(desc_ring) - 1)
++
++/* Determine the data array index from a logical position. */
++#define DATA_INDEX(data_ring, lpos) ((lpos) & DATA_SIZE_MASK(data_ring))
++
++/* Determine the desc array index from an ID or sequence number. */
++#define DESC_INDEX(desc_ring, n) ((n) & DESCS_COUNT_MASK(desc_ring))
++
++/* Determine how many times the data array has wrapped. */
++#define DATA_WRAPS(data_ring, lpos) ((lpos) >> (data_ring)->size_bits)
++
++/* Get the logical position at index 0 of the current wrap. */
++#define DATA_THIS_WRAP_START_LPOS(data_ring, lpos) \
++((lpos) & ~DATA_SIZE_MASK(data_ring))
++
++/* Get the ID for the same index of the previous wrap as the given ID. */
++#define DESC_ID_PREV_WRAP(desc_ring, id) \
++DESC_ID((id) - DESCS_COUNT(desc_ring))
++
++/*
++ * A data block: mapped directly to the beginning of the data block area
++ * specified as a logical position within the data ring.
++ *
++ * @id: the ID of the associated descriptor
++ * @data: the writer data
++ *
++ * Note that the size of a data block is only known by its associated
++ * descriptor.
++ */
++struct prb_data_block {
++ unsigned long id;
++ char data[0];
++};
++
++/*
++ * Return the descriptor associated with @n. @n can be either a
++ * descriptor ID or a sequence number.
++ */
++static struct prb_desc *to_desc(struct prb_desc_ring *desc_ring, u64 n)
++{
++ return &desc_ring->descs[DESC_INDEX(desc_ring, n)];
++}
++
++static struct prb_data_block *to_block(struct prb_data_ring *data_ring,
++ unsigned long begin_lpos)
++{
++ return (void *)&data_ring->data[DATA_INDEX(data_ring, begin_lpos)];
++}
++
++/*
++ * Increase the data size to account for data block meta data plus any
++ * padding so that the adjacent data block is aligned on the ID size.
++ */
++static unsigned int to_blk_size(unsigned int size)
++{
++ struct prb_data_block *db = NULL;
++
++ size += sizeof(*db);
++ size = ALIGN(size, sizeof(db->id));
++ return size;
++}
++
++/*
++ * Sanity checker for reserve size. The ringbuffer code assumes that a data
++ * block does not exceed the maximum possible size that could fit within the
++ * ringbuffer. This function provides that basic size check so that the
++ * assumption is safe.
++ *
++ * Writers are also not allowed to write 0-sized (data-less) records. Such
++ * records are used only internally by the ringbuffer.
++ */
++static bool data_check_size(struct prb_data_ring *data_ring, unsigned int size)
++{
++ struct prb_data_block *db = NULL;
++
++ /*
++ * Writers are not allowed to write data-less records. Such records
++ * are used only internally by the ringbuffer to denote records where
++ * their data failed to allocate or have been lost.
++ */
++ if (size == 0)
++ return false;
++
++ /*
++ * Ensure the alignment padded size could possibly fit in the data
++ * array. The largest possible data block must still leave room for
++ * at least the ID of the next block.
++ */
++ size = to_blk_size(size);
++ if (size > DATA_SIZE(data_ring) - sizeof(db->id))
++ return false;
++
++ return true;
++}
++
++/* The possible responses of a descriptor state-query. */
++enum desc_state {
++ desc_miss, /* ID mismatch */
++ desc_reserved, /* reserved, in use by writer */
++ desc_committed, /* committed, writer is done */
++ desc_reusable, /* free, not yet used by any writer */
++};
++
++/* Query the state of a descriptor. */
++static enum desc_state get_desc_state(unsigned long id,
++ unsigned long state_val)
++{
++ if (id != DESC_ID(state_val))
++ return desc_miss;
++
++ if (state_val & DESC_REUSE_MASK)
++ return desc_reusable;
++
++ if (state_val & DESC_COMMITTED_MASK)
++ return desc_committed;
++
++ return desc_reserved;
++}
++
++/*
++ * Get a copy of a specified descriptor and its queried state. A descriptor
++ * that is not in the committed or reusable state must be considered garbage
++ * by the reader.
++ */
++static enum desc_state desc_read(struct prb_desc_ring *desc_ring,
++ unsigned long id, struct prb_desc *desc_out)
++{
++ struct prb_desc *desc = to_desc(desc_ring, id);
++ atomic_long_t *state_var = &desc->state_var;
++ enum desc_state d_state;
++ unsigned long state_val;
++
++ /* Check the descriptor state. */
++ state_val = atomic_long_read(state_var); /* LMM(desc_read:A) */
++ d_state = get_desc_state(id, state_val);
++ if (d_state != desc_committed && d_state != desc_reusable)
++ return d_state;
++
++ /*
++ * Guarantee the state is loaded before copying the descriptor
++ * content. This avoids copying obsolete descriptor content that might
++ * not apply to the descriptor state. This pairs with prb_commit:B.
++ *
++ * Memory barrier involvement:
++ *
++ * If desc_read:A reads from prb_commit:B, then desc_read:C reads
++ * from prb_commit:A.
++ *
++ * Relies on:
++ *
++ * WMB from prb_commit:A to prb_commit:B
++ * matching
++ * RMB from desc_read:A to desc_read:C
++ */
++ smp_rmb(); /* LMM(desc_read:B) */
++
++ /*
++ * Copy the descriptor data. The data is not valid until the
++ * state has been re-checked.
++ */
++ memcpy(desc_out, desc, sizeof(*desc_out)); /* LMM(desc_read:C) */
++
++ /*
++ * 1. Guarantee the descriptor content is loaded before re-checking
++ * the state. This avoids reading an obsolete descriptor state
++ * that may not apply to the copied content. This pairs with
++ * desc_reserve:F.
++ *
++ * Memory barrier involvement:
++ *
++ * If desc_read:C reads from desc_reserve:G, then desc_read:E
++ * reads from desc_reserve:F.
++ *
++ * Relies on:
++ *
++ * WMB from desc_reserve:F to desc_reserve:G
++ * matching
++ * RMB from desc_read:C to desc_read:E
++ *
++ * 2. Guarantee the record data is loaded before re-checking the
++ * state. This avoids reading an obsolete descriptor state that may
++ * not apply to the copied data. This pairs with data_alloc:A.
++ *
++ * Memory barrier involvement:
++ *
++ * If copy_data:A reads from data_alloc:B, then desc_read:E
++ * reads from desc_make_reusable:A.
++ *
++ * Relies on:
++ *
++ * MB from desc_make_reusable:A to data_alloc:B
++ * matching
++ * RMB from desc_read:C to desc_read:E
++ *
++ * Note: desc_make_reusable:A and data_alloc:B can be different
++ * CPUs. However, the data_alloc:B CPU (which performs the
++ * full memory barrier) must have previously seen
++ * desc_make_reusable:A.
++ */
++ smp_rmb(); /* LMM(desc_read:D) */
++
++ /* Re-check the descriptor state. */
++ state_val = atomic_long_read(state_var); /* LMM(desc_read:E) */
++ return get_desc_state(id, state_val);
++}
++
++/*
++ * Take a specified descriptor out of the committed state by attempting
++ * the transition from committed to reusable. Either this context or some
++ * other context will have been successful.
++ */
++static void desc_make_reusable(struct prb_desc_ring *desc_ring,
++ unsigned long id)
++{
++ unsigned long val_committed = id | DESC_COMMITTED_MASK;
++ unsigned long val_reusable = val_committed | DESC_REUSE_MASK;
++ struct prb_desc *desc = to_desc(desc_ring, id);
++ atomic_long_t *state_var = &desc->state_var;
++
++ atomic_long_cmpxchg_relaxed(state_var, val_committed,
++ val_reusable); /* LMM(desc_make_reusable:A) */
++}
++
++/*
++ * Given a data ring (text or dict), put the associated descriptor of each
++ * data block from @lpos_begin until @lpos_end into the reusable state.
++ *
++ * If there is any problem making the associated descriptor reusable, either
++ * the descriptor has not yet been committed or another writer context has
++ * already pushed the tail lpos past the problematic data block. Regardless,
++ * on error the caller can re-load the tail lpos to determine the situation.
++ */
++static bool data_make_reusable(struct printk_ringbuffer *rb,
++ struct prb_data_ring *data_ring,
++ unsigned long lpos_begin,
++ unsigned long lpos_end,
++ unsigned long *lpos_out)
++{
++ struct prb_desc_ring *desc_ring = &rb->desc_ring;
++ struct prb_data_blk_lpos *blk_lpos;
++ struct prb_data_block *blk;
++ enum desc_state d_state;
++ struct prb_desc desc;
++ unsigned long id;
++
++ /*
++ * Using the provided @data_ring, point @blk_lpos to the correct
++ * blk_lpos within the local copy of the descriptor.
++ */
++ if (data_ring == &rb->text_data_ring)
++ blk_lpos = &desc.text_blk_lpos;
++ else
++ blk_lpos = &desc.dict_blk_lpos;
++
++ /* Loop until @lpos_begin has advanced to or beyond @lpos_end. */
++ while ((lpos_end - lpos_begin) - 1 < DATA_SIZE(data_ring)) {
++ blk = to_block(data_ring, lpos_begin);
++
++ /*
++ * Load the block ID from the data block. This is a data race
++ * against a writer that may have newly reserved this data
++ * area. If the loaded value matches a valid descriptor ID,
++ * the blk_lpos of that descriptor will be checked to make
++ * sure it points back to this data block. If the check fails,
++ * the data area has been recycled by another writer.
++ */
++ id = blk->id; /* LMM(data_make_reusable:A) */
++
++ d_state = desc_read(desc_ring, id, &desc); /* LMM(data_make_reusable:B) */
++
++ switch (d_state) {
++ case desc_miss:
++ return false;
++ case desc_reserved:
++ return false;
++ case desc_committed:
++ /*
++ * This data block is invalid if the descriptor
++ * does not point back to it.
++ */
++ if (blk_lpos->begin != lpos_begin)
++ return false;
++ desc_make_reusable(desc_ring, id);
++ break;
++ case desc_reusable:
++ /*
++ * This data block is invalid if the descriptor
++ * does not point back to it.
++ */
++ if (blk_lpos->begin != lpos_begin)
++ return false;
++ break;
++ }
++
++ /* Advance @lpos_begin to the next data block. */
++ lpos_begin = blk_lpos->next;
++ }
++
++ *lpos_out = lpos_begin;
++ return true;
++}
++
++/*
++ * Advance the data ring tail to at least @lpos. This function puts
++ * descriptors into the reusable state if the tail is pushed beyond
++ * their associated data block.
++ */
++static bool data_push_tail(struct printk_ringbuffer *rb,
++ struct prb_data_ring *data_ring,
++ unsigned long lpos)
++{
++ unsigned long tail_lpos_new;
++ unsigned long tail_lpos;
++ unsigned long next_lpos;
++
++ /* If @lpos is not valid, there is nothing to do. */
++ if (lpos == INVALID_LPOS)
++ return true;
++
++ /*
++ * Any descriptor states that have transitioned to reusable due to the
++ * data tail being pushed to this loaded value will be visible to this
++ * CPU. This pairs with data_push_tail:D.
++ *
++ * Memory barrier involvement:
++ *
++ * If data_push_tail:A reads from data_push_tail:D, then this CPU can
++ * see desc_make_reusable:A.
++ *
++ * Relies on:
++ *
++ * MB from desc_make_reusable:A to data_push_tail:D
++ * matches
++ * READFROM from data_push_tail:D to data_push_tail:A
++ * thus
++ * READFROM from desc_make_reusable:A to this CPU
++ */
++ tail_lpos = atomic_long_read(&data_ring->tail_lpos); /* LMM(data_push_tail:A) */
++
++ /*
++ * Loop until the tail lpos is at or beyond @lpos. This condition
++ * may already be satisfied, resulting in no full memory barrier
++ * from data_push_tail:D being performed. However, since this CPU
++ * sees the new tail lpos, any descriptor states that transitioned to
++ * the reusable state must already be visible.
++ */
++ while ((lpos - tail_lpos) - 1 < DATA_SIZE(data_ring)) {
++ /*
++ * Make all descriptors reusable that are associated with
++ * data blocks before @lpos.
++ */
++ if (!data_make_reusable(rb, data_ring, tail_lpos, lpos,
++ &next_lpos)) {
++ /*
++ * 1. Guarantee the block ID loaded in
++ * data_make_reusable() is performed before
++ * reloading the tail lpos. The failed
++ * data_make_reusable() may be due to a newly
++ * recycled data area causing the tail lpos to
++ * have been previously pushed. This pairs with
++ * data_alloc:A.
++ *
++ * Memory barrier involvement:
++ *
++ * If data_make_reusable:A reads from data_alloc:B,
++ * then data_push_tail:C reads from
++ * data_push_tail:D.
++ *
++ * Relies on:
++ *
++ * MB from data_push_tail:D to data_alloc:B
++ * matching
++ * RMB from data_make_reusable:A to
++ * data_push_tail:C
++ *
++ * Note: data_push_tail:D and data_alloc:B can be
++ * different CPUs. However, the data_alloc:B
++ * CPU (which performs the full memory
++ * barrier) must have previously seen
++ * data_push_tail:D.
++ *
++ * 2. Guarantee the descriptor state loaded in
++ * data_make_reusable() is performed before
++ * reloading the tail lpos. The failed
++ * data_make_reusable() may be due to a newly
++ * recycled descriptor causing the tail lpos to
++ * have been previously pushed. This pairs with
++ * desc_reserve:D.
++ *
++ * Memory barrier involvement:
++ *
++ * If data_make_reusable:B reads from
++ * desc_reserve:F, then data_push_tail:C reads
++ * from data_push_tail:D.
++ *
++ * Relies on:
++ *
++ * MB from data_push_tail:D to desc_reserve:F
++ * matching
++ * RMB from data_make_reusable:B to
++ * data_push_tail:C
++ *
++ * Note: data_push_tail:D and desc_reserve:F can
++ * be different CPUs. However, the
++ * desc_reserve:F CPU (which performs the
++ * full memory barrier) must have previously
++ * seen data_push_tail:D.
++ */
++ smp_rmb(); /* LMM(data_push_tail:B) */
++
++ tail_lpos_new = atomic_long_read(&data_ring->tail_lpos
++ ); /* LMM(data_push_tail:C) */
++ if (tail_lpos_new == tail_lpos)
++ return false;
++
++ /* Another CPU pushed the tail. Try again. */
++ tail_lpos = tail_lpos_new;
++ continue;
++ }
++
++ /*
++ * Guarantee any descriptor states that have transitioned to
++ * reusable are stored before pushing the tail lpos. A full
++ * memory barrier is needed since other CPUs may have made
++ * the descriptor states reusable. This pairs with
++ * data_push_tail:A.
++ */
++ if (atomic_long_try_cmpxchg(&data_ring->tail_lpos, &tail_lpos,
++ next_lpos)) { /* LMM(data_push_tail:D) */
++ break;
++ }
++ }
++
++ return true;
++}
++
++/*
++ * Advance the desc ring tail. This function advances the tail by one
++ * descriptor, thus invalidating the oldest descriptor. Before advancing
++ * the tail, the tail descriptor is made reusable and all data blocks up to
++ * and including the descriptor's data block are invalidated (i.e. the data
++ * ring tail is pushed past the data block of the descriptor being made
++ * reusable).
++ */
++static bool desc_push_tail(struct printk_ringbuffer *rb,
++ unsigned long tail_id)
++{
++ struct prb_desc_ring *desc_ring = &rb->desc_ring;
++ enum desc_state d_state;
++ struct prb_desc desc;
++
++ d_state = desc_read(desc_ring, tail_id, &desc);
++
++ switch (d_state) {
++ case desc_miss:
++ /*
++ * If the ID is exactly 1 wrap behind the expected, it is
++ * in the process of being reserved by another writer and
++ * must be considered reserved.
++ */
++ if (DESC_ID(atomic_long_read(&desc.state_var)) ==
++ DESC_ID_PREV_WRAP(desc_ring, tail_id)) {
++ return false;
++ }
++
++ /*
++ * The ID has changed. Another writer must have pushed the
++ * tail and recycled the descriptor already. Success is
++ * returned because the caller is only interested in the
++ * specified tail being pushed, which it was.
++ */
++ return true;
++ case desc_reserved:
++ return false;
++ case desc_committed:
++ desc_make_reusable(desc_ring, tail_id);
++ break;
++ case desc_reusable:
++ break;
++ }
++
++ /*
++ * Data blocks must be invalidated before their associated
++ * descriptor can be made available for recycling. Invalidating
++ * them later is not possible because there is no way to trust
++ * data blocks once their associated descriptor is gone.
++ */
++
++ if (!data_push_tail(rb, &rb->text_data_ring, desc.text_blk_lpos.next))
++ return false;
++ if (!data_push_tail(rb, &rb->dict_data_ring, desc.dict_blk_lpos.next))
++ return false;
++
++ /*
++ * Check the next descriptor after @tail_id before pushing the tail
++ * to it because the tail must always be in a committed or reusable
++ * state. The implementation of prb_first_seq() relies on this.
++ *
++ * A successful read implies that the next descriptor is less than or
++ * equal to @head_id so there is no risk of pushing the tail past the
++ * head.
++ */
++ d_state = desc_read(desc_ring, DESC_ID(tail_id + 1), &desc); /* LMM(desc_push_tail:A) */
++
++ if (d_state == desc_committed || d_state == desc_reusable) {
++ /*
++ * Guarantee any descriptor states that have transitioned to
++ * reusable are stored before pushing the tail ID. This allows
++ * verifying the recycled descriptor state. A full memory
++ * barrier is needed since other CPUs may have made the
++ * descriptor states reusable. This pairs with desc_reserve:D.
++ */
++ atomic_long_cmpxchg(&desc_ring->tail_id, tail_id,
++ DESC_ID(tail_id + 1)); /* LMM(desc_push_tail:B) */
++ } else {
++ /*
++ * Guarantee the last state load from desc_read() is before
++ * reloading @tail_id in order to see a new tail ID in the
++ * case that the descriptor has been recycled. This pairs
++ * with desc_reserve:D.
++ *
++ * Memory barrier involvement:
++ *
++ * If desc_push_tail:A reads from desc_reserve:F, then
++ * desc_push_tail:D reads from desc_push_tail:B.
++ *
++ * Relies on:
++ *
++ * MB from desc_push_tail:B to desc_reserve:F
++ * matching
++ * RMB from desc_push_tail:A to desc_push_tail:D
++ *
++ * Note: desc_push_tail:B and desc_reserve:F can be different
++ * CPUs. However, the desc_reserve:F CPU (which performs
++ * the full memory barrier) must have previously seen
++ * desc_push_tail:B.
++ */
++ smp_rmb(); /* LMM(desc_push_tail:C) */
++
++ /*
++ * Re-check the tail ID. The descriptor following @tail_id is
++ * not in an allowed tail state. But if the tail has since
++ * been moved by another CPU, then it does not matter.
++ */
++ if (atomic_long_read(&desc_ring->tail_id) == tail_id) /* LMM(desc_push_tail:D) */
++ return false;
++ }
++
++ return true;
++}
++
++/* Reserve a new descriptor, invalidating the oldest if necessary. */
++static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out)
++{
++ struct prb_desc_ring *desc_ring = &rb->desc_ring;
++ unsigned long prev_state_val;
++ unsigned long id_prev_wrap;
++ struct prb_desc *desc;
++ unsigned long head_id;
++ unsigned long id;
++
++ head_id = atomic_long_read(&desc_ring->head_id); /* LMM(desc_reserve:A) */
++
++ do {
++ desc = to_desc(desc_ring, head_id);
++
++ id = DESC_ID(head_id + 1);
++ id_prev_wrap = DESC_ID_PREV_WRAP(desc_ring, id);
++
++ /*
++ * Guarantee the head ID is read before reading the tail ID.
++ * Since the tail ID is updated before the head ID, this
++ * guarantees that @id_prev_wrap is never ahead of the tail
++ * ID. This pairs with desc_reserve:D.
++ *
++ * Memory barrier involvement:
++ *
++ * If desc_reserve:A reads from desc_reserve:D, then
++ * desc_reserve:C reads from desc_push_tail:B.
++ *
++ * Relies on:
++ *
++ * MB from desc_push_tail:B to desc_reserve:D
++ * matching
++ * RMB from desc_reserve:A to desc_reserve:C
++ *
++ * Note: desc_push_tail:B and desc_reserve:D can be different
++ * CPUs. However, the desc_reserve:D CPU (which performs
++ * the full memory barrier) must have previously seen
++ * desc_push_tail:B.
++ */
++ smp_rmb(); /* LMM(desc_reserve:B) */
++
++ if (id_prev_wrap == atomic_long_read(&desc_ring->tail_id
++ )) { /* LMM(desc_reserve:C) */
++ /*
++ * Make space for the new descriptor by
++ * advancing the tail.
++ */
++ if (!desc_push_tail(rb, id_prev_wrap))
++ return false;
++ }
++
++ /*
++ * 1. Guarantee the tail ID is read before validating the
++ * recycled descriptor state. A read memory barrier is
++ * sufficient for this. This pairs with desc_push_tail:B.
++ *
++ * Memory barrier involvement:
++ *
++ * If desc_reserve:C reads from desc_push_tail:B, then
++ * desc_reserve:E reads from desc_make_reusable:A.
++ *
++ * Relies on:
++ *
++ * MB from desc_make_reusable:A to desc_push_tail:B
++ * matching
++ * RMB from desc_reserve:C to desc_reserve:E
++ *
++ * Note: desc_make_reusable:A and desc_push_tail:B can be
++ * different CPUs. However, the desc_push_tail:B CPU
++ * (which performs the full memory barrier) must have
++ * previously seen desc_make_reusable:A.
++ *
++ * 2. Guarantee the tail ID is stored before storing the head
++ * ID. This pairs with desc_reserve:B.
++ *
++ * 3. Guarantee any data ring tail changes are stored before
++ * recycling the descriptor. Data ring tail changes can
++ * happen via desc_push_tail()->data_push_tail(). A full
++ * memory barrier is needed since another CPU may have
++ * pushed the data ring tails. This pairs with
++ * data_push_tail:B.
++ *
++ * 4. Guarantee a new tail ID is stored before recycling the
++ * descriptor. A full memory barrier is needed since
++ * another CPU may have pushed the tail ID. This pairs
++ * with desc_push_tail:C and this also pairs with
++ * prb_first_seq:C.
++ */
++ } while (!atomic_long_try_cmpxchg(&desc_ring->head_id, &head_id,
++ id)); /* LMM(desc_reserve:D) */
++
++ desc = to_desc(desc_ring, id);
++
++ /*
++ * If the descriptor has been recycled, verify the old state val.
++ * See "ABA Issues" about why this verification is performed.
++ */
++ prev_state_val = atomic_long_read(&desc->state_var); /* LMM(desc_reserve:E) */
++ if (prev_state_val &&
++ prev_state_val != (id_prev_wrap | DESC_COMMITTED_MASK | DESC_REUSE_MASK)) {
++ WARN_ON_ONCE(1);
++ return false;
++ }
++
++ /*
++ * Assign the descriptor a new ID and set its state to reserved.
++ * See "ABA Issues" about why cmpxchg() instead of set() is used.
++ *
++ * Guarantee the new descriptor ID and state is stored before making
++ * any other changes. A write memory barrier is sufficient for this.
++ * This pairs with desc_read:D.
++ */
++ if (!atomic_long_try_cmpxchg(&desc->state_var, &prev_state_val,
++ id | 0)) { /* LMM(desc_reserve:F) */
++ WARN_ON_ONCE(1);
++ return false;
++ }
++
++ /* Now data in @desc can be modified: LMM(desc_reserve:G) */
++
++ *id_out = id;
++ return true;
++}
++
++/* Determine the end of a data block. */
++static unsigned long get_next_lpos(struct prb_data_ring *data_ring,
++ unsigned long lpos, unsigned int size)
++{
++ unsigned long begin_lpos;
++ unsigned long next_lpos;
++
++ begin_lpos = lpos;
++ next_lpos = lpos + size;
++
++ /* First check if the data block does not wrap. */
++ if (DATA_WRAPS(data_ring, begin_lpos) == DATA_WRAPS(data_ring, next_lpos))
++ return next_lpos;
++
++ /* Wrapping data blocks store their data at the beginning. */
++ return (DATA_THIS_WRAP_START_LPOS(data_ring, next_lpos) + size);
++}
++
++/*
++ * Allocate a new data block, invalidating the oldest data block(s)
++ * if necessary. This function also associates the data block with
++ * a specified descriptor.
++ */
++static char *data_alloc(struct printk_ringbuffer *rb,
++ struct prb_data_ring *data_ring, unsigned int size,
++ struct prb_data_blk_lpos *blk_lpos, unsigned long id)
++{
++ struct prb_data_block *blk;
++ unsigned long begin_lpos;
++ unsigned long next_lpos;
++
++ if (size == 0) {
++ /* Specify a data-less block. */
++ blk_lpos->begin = INVALID_LPOS;
++ blk_lpos->next = INVALID_LPOS;
++ return NULL;
++ }
++
++ size = to_blk_size(size);
++
++ begin_lpos = atomic_long_read(&data_ring->head_lpos);
++
++ do {
++ next_lpos = get_next_lpos(data_ring, begin_lpos, size);
++
++ if (!data_push_tail(rb, data_ring, next_lpos - DATA_SIZE(data_ring))) {
++ /* Failed to allocate, specify a data-less block. */
++ blk_lpos->begin = INVALID_LPOS;
++ blk_lpos->next = INVALID_LPOS;
++ return NULL;
++ }
++
++ /*
++ * 1. Guarantee any descriptor states that have transitioned
++ * to reusable are stored before modifying the newly
++ * allocated data area. A full memory barrier is needed
++ * since other CPUs may have made the descriptor states
++ * reusable. See data_push_tail:A about why the reusable
++ * states are visible. This pairs with desc_read:D.
++ *
++ * 2. Guarantee any updated tail lpos is stored before
++ * modifying the newly allocated data area. Another CPU may
++ * be in data_make_reusable() and is reading a block ID
++ * from this area. data_make_reusable() can handle reading
++ * a garbage block ID value, but then it must be able to
++ * load a new tail lpos. A full memory barrier is needed
++ * since other CPUs may have updated the tail lpos. This
++ * pairs with data_push_tail:B.
++ */
++ } while (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &begin_lpos,
++ next_lpos)); /* LMM(data_alloc:A) */
++
++ blk = to_block(data_ring, begin_lpos);
++ blk->id = id; /* LMM(data_alloc:B) */
++
++ if (DATA_WRAPS(data_ring, begin_lpos) != DATA_WRAPS(data_ring, next_lpos)) {
++ /* Wrapping data blocks store their data at the beginning. */
++ blk = to_block(data_ring, 0);
++
++ /*
++ * Store the ID on the wrapped block for consistency.
++ * The printk_ringbuffer does not actually use it.
++ */
++ blk->id = id;
++ }
++
++ blk_lpos->begin = begin_lpos;
++ blk_lpos->next = next_lpos;
++
++ return &blk->data[0];
++}
++
++/* Return the number of bytes used by a data block. */
++static unsigned int space_used(struct prb_data_ring *data_ring,
++ struct prb_data_blk_lpos *blk_lpos)
++{
++ if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) {
++ /* Data block does not wrap. */
++ return (DATA_INDEX(data_ring, blk_lpos->next) -
++ DATA_INDEX(data_ring, blk_lpos->begin));
++ }
++
++ /*
++ * For wrapping data blocks, the trailing (wasted) space is
++ * also counted.
++ */
++ return (DATA_INDEX(data_ring, blk_lpos->next) +
++ DATA_SIZE(data_ring) - DATA_INDEX(data_ring, blk_lpos->begin));
++}
++
++/**
++ * prb_reserve() - Reserve space in the ringbuffer.
++ *
++ * @e: The entry structure to setup.
++ * @rb: The ringbuffer to reserve data in.
++ * @r: The record structure to allocate buffers for.
++ *
++ * This is the public function available to writers to reserve data.
++ *
++ * The writer specifies the text and dict sizes to reserve by setting the
++ * @text_buf_size and @dict_buf_size fields of @r, respectively. Dictionaries
++ * are optional, so @dict_buf_size is allowed to be 0. To ensure proper
++ * initialization of @r, prb_rec_init_wr() should be used.
++ *
++ * Context: Any context. Disables local interrupts on success.
++ * Return: true if at least text data could be allocated, otherwise false.
++ *
++ * On success, the fields @info, @text_buf, @dict_buf of @r will be set by
++ * this function and should be filled in by the writer before committing. Also
++ * on success, prb_record_text_space() can be used on @e to query the actual
++ * space used for the text data block.
++ *
++ * If the function fails to reserve dictionary space (but all else succeeded),
++ * it will still report success. In that case @dict_buf is set to NULL and
++ * @dict_buf_size is set to 0. Writers must check this before writing to
++ * dictionary space.
++ *
++ * @info->text_len and @info->dict_len will already be set to @text_buf_size
++ * and @dict_buf_size, respectively. If dictionary space reservation fails,
++ * @info->dict_len is set to 0.
++ */
++bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
++ struct printk_record *r)
++{
++ struct prb_desc_ring *desc_ring = &rb->desc_ring;
++ struct prb_desc *d;
++ unsigned long id;
++
++ if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
++ goto fail;
++
++ /* Records are allowed to not have dictionaries. */
++ if (r->dict_buf_size) {
++ if (!data_check_size(&rb->dict_data_ring, r->dict_buf_size))
++ goto fail;
++ }
++
++ /*
++ * Descriptors in the reserved state act as blockers to all further
++ * reservations once the desc_ring has fully wrapped. Disable
++ * interrupts during the reserve/commit window in order to minimize
++ * the likelihood of this happening.
++ */
++ local_irq_save(e->irqflags);
++
++ if (!desc_reserve(rb, &id)) {
++ /* Descriptor reservation failures are tracked. */
++ atomic_long_inc(&rb->fail);
++ local_irq_restore(e->irqflags);
++ goto fail;
++ }
++
++ d = to_desc(desc_ring, id);
++
++ /*
++ * Set the @e fields here so that prb_commit() can be used if
++ * text data allocation fails.
++ */
++ e->rb = rb;
++ e->id = id;
++
++ /*
++ * Initialize the sequence number if it has "never been set".
++ * Otherwise just increment it by a full wrap.
++ *
++ * @seq is considered "never been set" if it has a value of 0,
++ * _except_ for @descs[0], which was specially setup by the ringbuffer
++ * initializer and therefore is always considered as set.
++ *
++ * See the "Bootstrap" comment block in printk_ringbuffer.h for
++ * details about how the initializer bootstraps the descriptors.
++ */
++ if (d->info.seq == 0 && DESC_INDEX(desc_ring, id) != 0)
++ d->info.seq = DESC_INDEX(desc_ring, id);
++ else
++ d->info.seq += DESCS_COUNT(desc_ring);
++
++ r->text_buf = data_alloc(rb, &rb->text_data_ring, r->text_buf_size,
++ &d->text_blk_lpos, id);
++ /* If text data allocation fails, a data-less record is committed. */
++ if (r->text_buf_size && !r->text_buf) {
++ d->info.text_len = 0;
++ d->info.dict_len = 0;
++ prb_commit(e);
++ /* prb_commit() re-enabled interrupts. */
++ goto fail;
++ }
++
++ r->dict_buf = data_alloc(rb, &rb->dict_data_ring, r->dict_buf_size,
++ &d->dict_blk_lpos, id);
++ /*
++ * If dict data allocation fails, the caller can still commit
++ * text. But dictionary information will not be available.
++ */
++ if (r->dict_buf_size && !r->dict_buf)
++ r->dict_buf_size = 0;
++
++ r->info = &d->info;
++
++ /* Set default values for the sizes. */
++ d->info.text_len = r->text_buf_size;
++ d->info.dict_len = r->dict_buf_size;
++
++ /* Record full text space used by record. */
++ e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos);
++
++ return true;
++fail:
++ /* Make it clear to the caller that the reserve failed. */
++ memset(r, 0, sizeof(*r));
++ return false;
++}
++
++/**
++ * prb_commit() - Commit (previously reserved) data to the ringbuffer.
++ *
++ * @e: The entry containing the reserved data information.
++ *
++ * This is the public function available to writers to commit data.
++ *
++ * Context: Any context. Enables local interrupts.
++ */
++void prb_commit(struct prb_reserved_entry *e)
++{
++ struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
++ struct prb_desc *d = to_desc(desc_ring, e->id);
++ unsigned long prev_state_val = e->id | 0;
++
++ /* Now the writer has finished all writing: LMM(prb_commit:A) */
++
++ /*
++ * Set the descriptor as committed. See "ABA Issues" about why
++ * cmpxchg() instead of set() is used.
++ *
++ * Guarantee all record data is stored before the descriptor state
++ * is stored as committed. A write memory barrier is sufficient for
++ * this. This pairs with desc_read:B.
++ */
++ if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val,
++ e->id | DESC_COMMITTED_MASK)) { /* LMM(prb_commit:B) */
++ WARN_ON_ONCE(1);
++ }
++
++ /* Restore interrupts, the reserve/commit window is finished. */
++ local_irq_restore(e->irqflags);
++}
++
++/*
++ * Given @blk_lpos, return a pointer to the writer data from the data block
++ * and calculate the size of the data part. A NULL pointer is returned if
++ * @blk_lpos specifies values that could never be legal.
++ *
++ * This function (used by readers) performs strict validation on the lpos
++ * values to possibly detect bugs in the writer code. A WARN_ON_ONCE() is
++ * triggered if an internal error is detected.
++ */
++static char *get_data(struct prb_data_ring *data_ring,
++ struct prb_data_blk_lpos *blk_lpos,
++ unsigned int *data_size)
++{
++ struct prb_data_block *db;
++
++ /* Data-less data block description. */
++ if (blk_lpos->begin == INVALID_LPOS &&
++ blk_lpos->next == INVALID_LPOS) {
++ return NULL;
++ }
++
++ /* Regular data block: @begin less than @next and in same wrap. */
++ if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) &&
++ blk_lpos->begin < blk_lpos->next) {
++ db = to_block(data_ring, blk_lpos->begin);
++ *data_size = blk_lpos->next - blk_lpos->begin;
++
++ /* Wrapping data block: @begin is one wrap behind @next. */
++ } else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) ==
++ DATA_WRAPS(data_ring, blk_lpos->next)) {
++ db = to_block(data_ring, 0);
++ *data_size = DATA_INDEX(data_ring, blk_lpos->next);
++
++ /* Illegal block description. */
++ } else {
++ WARN_ON_ONCE(1);
++ return NULL;
++ }
++
++ /* A valid data block will always be aligned to the ID size. */
++ if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) ||
++ WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) {
++ return NULL;
++ }
++
++ /* A valid data block will always have at least an ID. */
++ if (WARN_ON_ONCE(*data_size < sizeof(db->id)))
++ return NULL;
++
++ /* Subtract block ID space from size to reflect data size. */
++ *data_size -= sizeof(db->id);
++
++ return &db->data[0];
++}
++
++/*
++ * Count the number of lines in provided text. All text has at least 1 line
++ * (even if @text_size is 0). Each '\n' processed is counted as an additional
++ * line.
++ */
++static unsigned int count_lines(char *text, unsigned int text_size)
++{
++ unsigned int next_size = text_size;
++ unsigned int line_count = 1;
++ char *next = text;
++
++ while (next_size) {
++ next = memchr(next, '\n', next_size);
++ if (!next)
++ break;
++ line_count++;
++ next++;
++ next_size = text_size - (next - text);
++ }
++
++ return line_count;
++}
++
++/*
++ * Given @blk_lpos, copy an expected @len of data into the provided buffer.
++ * If @line_count is provided, count the number of lines in the data.
++ *
++ * This function (used by readers) performs strict validation on the data
++ * size to possibly detect bugs in the writer code. A WARN_ON_ONCE() is
++ * triggered if an internal error is detected.
++ */
++static bool copy_data(struct prb_data_ring *data_ring,
++ struct prb_data_blk_lpos *blk_lpos, u16 len, char *buf,
++ unsigned int buf_size, unsigned int *line_count)
++{
++ unsigned int data_size;
++ char *data;
++
++ /* Caller might not want any data. */
++ if ((!buf || !buf_size) && !line_count)
++ return true;
++
++ data = get_data(data_ring, blk_lpos, &data_size);
++ if (!data)
++ return false;
++
++ /*
++ * Actual cannot be less than expected. It can be more than expected
++ * because of the trailing alignment padding.
++ */
++ if (WARN_ON_ONCE(data_size < (unsigned int)len)) {
++ pr_warn_once("wrong data size (%u, expecting %hu) for data: %.*s\n",
++ data_size, len, data_size, data);
++ return false;
++ }
++
++ /* Caller interested in the line count? */
++ if (line_count)
++ *line_count = count_lines(data, data_size);
++
++ /* Caller interested in the data content? */
++ if (!buf || !buf_size)
++ return true;
++
++ data_size = min_t(u16, buf_size, len);
++
++ if (!WARN_ON_ONCE(!data_size))
++ memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */
++ return true;
++}
++
++/*
++ * This is an extended version of desc_read(). It gets a copy of a specified
++ * descriptor. However, it also verifies that the record is committed and has
++ * the sequence number @seq. On success, 0 is returned.
++ *
++ * Error return values:
++ * -EINVAL: A committed record with sequence number @seq does not exist.
++ * -ENOENT: A committed record with sequence number @seq exists, but its data
++ * is not available. This is a valid record, so readers should
++ * continue with the next record.
++ */
++static int desc_read_committed_seq(struct prb_desc_ring *desc_ring,
++ unsigned long id, u64 seq,
++ struct prb_desc *desc_out)
++{
++ struct prb_data_blk_lpos *blk_lpos = &desc_out->text_blk_lpos;
++ enum desc_state d_state;
++
++ d_state = desc_read(desc_ring, id, desc_out);
++
++ /*
++ * An unexpected @id (desc_miss) or @seq mismatch means the record
++ * does not exist. A descriptor in the reserved state means the
++ * record does not yet exist for the reader.
++ */
++ if (d_state == desc_miss ||
++ d_state == desc_reserved ||
++ desc_out->info.seq != seq) {
++ return -EINVAL;
++ }
++
++ /*
++ * A descriptor in the reusable state may no longer have its data
++ * available; report it as a data-less record. Or the record may
++ * actually be a data-less record.
++ */
++ if (d_state == desc_reusable ||
++ (blk_lpos->begin == INVALID_LPOS && blk_lpos->next == INVALID_LPOS)) {
++ return -ENOENT;
++ }
++
++ return 0;
++}
++
++/*
++ * Copy the ringbuffer data from the record with @seq to the provided
++ * @r buffer. On success, 0 is returned.
++ *
++ * See desc_read_committed_seq() for error return values.
++ */
++static int prb_read(struct printk_ringbuffer *rb, u64 seq,
++ struct printk_record *r, unsigned int *line_count)
++{
++ struct prb_desc_ring *desc_ring = &rb->desc_ring;
++ struct prb_desc *rdesc = to_desc(desc_ring, seq);
++ atomic_long_t *state_var = &rdesc->state_var;
++ struct prb_desc desc;
++ unsigned long id;
++ int err;
++
++ /* Extract the ID, used to specify the descriptor to read. */
++ id = DESC_ID(atomic_long_read(state_var));
++
++ /* Get a local copy of the correct descriptor (if available). */
++ err = desc_read_committed_seq(desc_ring, id, seq, &desc);
++
++ /*
++ * If @r is NULL, the caller is only interested in the availability
++ * of the record.
++ */
++ if (err || !r)
++ return err;
++
++ /* If requested, copy meta data. */
++ if (r->info)
++ memcpy(r->info, &desc.info, sizeof(*(r->info)));
++
++ /* Copy text data. If it fails, this is a data-less record. */
++ if (!copy_data(&rb->text_data_ring, &desc.text_blk_lpos, desc.info.text_len,
++ r->text_buf, r->text_buf_size, line_count)) {
++ return -ENOENT;
++ }
++
++ /*
++ * Copy dict data. Although this should not fail, dict data is not
++ * important. So if it fails, modify the copied meta data to report
++ * that there is no dict data, thus silently dropping the dict data.
++ */
++ if (!copy_data(&rb->dict_data_ring, &desc.dict_blk_lpos, desc.info.dict_len,
++ r->dict_buf, r->dict_buf_size, NULL)) {
++ if (r->info)
++ r->info->dict_len = 0;
++ }
++
++ /* Ensure the record is still committed and has the same @seq. */
++ return desc_read_committed_seq(desc_ring, id, seq, &desc);
++}
++
++/* Get the sequence number of the tail descriptor. */
++static u64 prb_first_seq(struct printk_ringbuffer *rb)
++{
++ struct prb_desc_ring *desc_ring = &rb->desc_ring;
++ enum desc_state d_state;
++ struct prb_desc desc;
++ unsigned long id;
++
++ for (;;) {
++ id = atomic_long_read(&rb->desc_ring.tail_id); /* LMM(prb_first_seq:A) */
++
++ d_state = desc_read(desc_ring, id, &desc); /* LMM(prb_first_seq:B) */
++
++ /*
++ * This loop will not be infinite because the tail is
++ * _always_ in the committed or reusable state.
++ */
++ if (d_state == desc_committed || d_state == desc_reusable)
++ break;
++
++ /*
++ * Guarantee the last state load from desc_read() is before
++ * reloading @tail_id in order to see a new tail in the case
++ * that the descriptor has been recycled. This pairs with
++ * desc_reserve:D.
++ *
++ * Memory barrier involvement:
++ *
++ * If prb_first_seq:B reads from desc_reserve:F, then
++ * prb_first_seq:A reads from desc_push_tail:B.
++ *
++ * Relies on:
++ *
++ * MB from desc_push_tail:B to desc_reserve:F
++ * matching
++ * RMB prb_first_seq:B to prb_first_seq:A
++ */
++ smp_rmb(); /* LMM(prb_first_seq:C) */
++ }
++
++ return desc.info.seq;
++}
++
++/*
++ * Non-blocking read of a record. Updates @seq to the last committed record
++ * (which may have no data).
++ *
++ * See the description of prb_read_valid() and prb_read_valid_info()
++ * for details.
++ */
++static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
++ struct printk_record *r, unsigned int *line_count)
++{
++ u64 tail_seq;
++ int err;
++
++ while ((err = prb_read(rb, *seq, r, line_count))) {
++ tail_seq = prb_first_seq(rb);
++
++ if (*seq < tail_seq) {
++ /*
++ * Behind the tail. Catch up and try again. This
++ * can happen for -ENOENT and -EINVAL cases.
++ */
++ *seq = tail_seq;
++
++ } else if (err == -ENOENT) {
++ /* Record exists, but no data available. Skip. */
++ (*seq)++;
++
++ } else {
++ /* Non-existent/non-committed record. Must stop. */
++ return false;
++ }
++ }
++
++ return true;
++}
++
++/**
++ * prb_read_valid() - Non-blocking read of a requested record or (if gone)
++ * the next available record.
++ *
++ * @rb: The ringbuffer to read from.
++ * @seq: The sequence number of the record to read.
++ * @r: A record data buffer to store the read record to.
++ *
++ * This is the public function available to readers to read a record.
++ *
++ * The reader provides the @info, @text_buf, @dict_buf buffers of @r to be
++ * filled in. Any of the buffer pointers can be set to NULL if the reader
++ * is not interested in that data. To ensure proper initialization of @r,
++ * prb_rec_init_rd() should be used.
++ *
++ * Context: Any context.
++ * Return: true if a record was read, otherwise false.
++ *
++ * On success, the reader must check r->info.seq to see which record was
++ * actually read. This allows the reader to detect dropped records.
++ *
++ * Failure means @seq refers to a not yet written record.
++ */
++bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
++ struct printk_record *r)
++{
++ return _prb_read_valid(rb, &seq, r, NULL);
++}
++
++/**
++ * prb_read_valid_info() - Non-blocking read of meta data for a requested
++ * record or (if gone) the next available record.
++ *
++ * @rb: The ringbuffer to read from.
++ * @seq: The sequence number of the record to read.
++ * @info: A buffer to store the read record meta data to.
++ * @line_count: A buffer to store the number of lines in the record text.
++ *
++ * This is the public function available to readers to read only the
++ * meta data of a record.
++ *
++ * The reader provides the @info, @line_count buffers to be filled in.
++ * Either of the buffer pointers can be set to NULL if the reader is not
++ * interested in that data.
++ *
++ * Context: Any context.
++ * Return: true if a record's meta data was read, otherwise false.
++ *
++ * On success, the reader must check info->seq to see which record meta data
++ * was actually read. This allows the reader to detect dropped records.
++ *
++ * Failure means @seq refers to a not yet written record.
++ */
++bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq,
++ struct printk_info *info, unsigned int *line_count)
++{
++ struct printk_record r;
++
++ prb_rec_init_rd(&r, info, NULL, 0, NULL, 0);
++
++ return _prb_read_valid(rb, &seq, &r, line_count);
++}
++
++/**
++ * prb_first_valid_seq() - Get the sequence number of the oldest available
++ * record.
++ *
++ * @rb: The ringbuffer to get the sequence number from.
++ *
++ * This is the public function available to readers to see what the
++ * first/oldest valid sequence number is.
++ *
++ * This provides readers a starting point to begin iterating the ringbuffer.
++ *
++ * Context: Any context.
++ * Return: The sequence number of the first/oldest record or, if the
++ * ringbuffer is empty, 0 is returned.
++ */
++u64 prb_first_valid_seq(struct printk_ringbuffer *rb)
++{
++ u64 seq = 0;
++
++ if (!_prb_read_valid(rb, &seq, NULL, NULL))
++ return 0;
++
++ return seq;
++}
++
++/**
++ * prb_next_seq() - Get the sequence number after the last available record.
++ *
++ * @rb: The ringbuffer to get the sequence number from.
++ *
++ * This is the public function available to readers to see what the next
++ * newest sequence number available to readers will be.
++ *
++ * This provides readers a sequence number to jump to if all currently
++ * available records should be skipped.
++ *
++ * Context: Any context.
++ * Return: The sequence number of the next newest (not yet available) record
++ * for readers.
++ */
++u64 prb_next_seq(struct printk_ringbuffer *rb)
++{
++ u64 seq = 0;
++
++ /* Search forward from the oldest descriptor. */
++ while (_prb_read_valid(rb, &seq, NULL, NULL))
++ seq++;
++
++ return seq;
++}
++
++/**
++ * prb_init() - Initialize a ringbuffer to use provided external buffers.
++ *
++ * @rb: The ringbuffer to initialize.
++ * @text_buf: The data buffer for text data.
++ * @textbits: The size of @text_buf as a power-of-2 value.
++ * @dict_buf: The data buffer for dictionary data.
++ * @dictbits: The size of @dict_buf as a power-of-2 value.
++ * @descs: The descriptor buffer for ringbuffer records.
++ * @descbits: The count of @descs items as a power-of-2 value.
++ *
++ * This is the public function available to writers to setup a ringbuffer
++ * during runtime using provided buffers.
++ *
++ * This must match the initialization of DEFINE_PRINTKRB().
++ *
++ * Context: Any context.
++ */
++void prb_init(struct printk_ringbuffer *rb,
++ char *text_buf, unsigned int textbits,
++ char *dict_buf, unsigned int dictbits,
++ struct prb_desc *descs, unsigned int descbits)
++{
++ memset(descs, 0, _DESCS_COUNT(descbits) * sizeof(descs[0]));
++
++ rb->desc_ring.count_bits = descbits;
++ rb->desc_ring.descs = descs;
++ atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits));
++ atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits));
++
++ rb->text_data_ring.size_bits = textbits;
++ rb->text_data_ring.data = text_buf;
++ atomic_long_set(&rb->text_data_ring.head_lpos, BLK0_LPOS(textbits));
++ atomic_long_set(&rb->text_data_ring.tail_lpos, BLK0_LPOS(textbits));
++
++ rb->dict_data_ring.size_bits = dictbits;
++ rb->dict_data_ring.data = dict_buf;
++ atomic_long_set(&rb->dict_data_ring.head_lpos, BLK0_LPOS(dictbits));
++ atomic_long_set(&rb->dict_data_ring.tail_lpos, BLK0_LPOS(dictbits));
++
++ atomic_long_set(&rb->fail, 0);
++
++ descs[0].info.seq = -(u64)_DESCS_COUNT(descbits);
++
++ descs[_DESCS_COUNT(descbits) - 1].info.seq = 0;
++ atomic_long_set(&(descs[_DESCS_COUNT(descbits) - 1].state_var), DESC0_SV(descbits));
++ descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.begin = INVALID_LPOS;
++ descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.next = INVALID_LPOS;
++ descs[_DESCS_COUNT(descbits) - 1].dict_blk_lpos.begin = INVALID_LPOS;
++ descs[_DESCS_COUNT(descbits) - 1].dict_blk_lpos.next = INVALID_LPOS;
++}
++
++/**
++ * prb_record_text_space() - Query the full actual used ringbuffer space for
++ * the text data of a reserved entry.
++ *
++ * @e: The successfully reserved entry to query.
++ *
++ * This is the public function available to writers to see how much actual
++ * space is used in the ringbuffer to store the text data of the specified
++ * entry.
++ *
++ * This function is only valid if @e has been successfully reserved using
++ * prb_reserve().
++ *
++ * Context: Any context.
++ * Return: The size in bytes used by the text data of the associated record.
++ */
++unsigned int prb_record_text_space(struct prb_reserved_entry *e)
++{
++ return e->text_space;
++}
+--- /dev/null
++++ b/kernel/printk/printk_ringbuffer.h
+@@ -0,0 +1,399 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++
++#ifndef _KERNEL_PRINTK_RINGBUFFER_H
++#define _KERNEL_PRINTK_RINGBUFFER_H
++
++#include <linux/atomic.h>
++
++/*
++ * Meta information about each stored message.
++ *
++ * All fields are set and used by the printk code except for
++ * @seq, @text_len, @dict_len, which are set and/or modified
++ * by the ringbuffer code.
++ */
++struct printk_info {
++ u64 seq; /* sequence number */
++ u64 ts_nsec; /* timestamp in nanoseconds */
++ u16 text_len; /* length of text message */
++ u16 dict_len; /* length of dictionary message */
++ u8 facility; /* syslog facility */
++ u8 flags:5; /* internal record flags */
++ u8 level:3; /* syslog level */
++ u32 caller_id; /* thread id or processor id */
++};
++
++/*
++ * A structure providing the buffers, used by writers and readers.
++ *
++ * Writers:
++ * Using prb_rec_init_wr(), a writer sets @text_buf_size and @dict_buf_size
++ * before calling prb_reserve(). On success, prb_reserve() sets @info,
++ * @text_buf, @dict_buf to buffers reserved for that writer.
++ *
++ * Readers:
++ * Using prb_rec_init_rd(), a reader sets all fields before calling
++ * prb_read_valid(). Note that the reader provides the @info, @text_buf,
++ * @dict_buf buffers. On success, the struct pointed to by @info will be
++ * filled and the char arrays pointed to by @text_buf and @dict_buf will
++ * be filled with text and dict data.
++ */
++struct printk_record {
++ struct printk_info *info;
++ char *text_buf;
++ char *dict_buf;
++ unsigned int text_buf_size;
++ unsigned int dict_buf_size;
++};
++
++/* Specifies the logical position and span of a data block. */
++struct prb_data_blk_lpos {
++ unsigned long begin;
++ unsigned long next;
++};
++
++/*
++ * A descriptor: the complete meta-data for a record.
++ *
++ * @state_var: A bitwise combination of descriptor ID and descriptor state.
++ */
++struct prb_desc {
++ struct printk_info info;
++ atomic_long_t state_var;
++ struct prb_data_blk_lpos text_blk_lpos;
++ struct prb_data_blk_lpos dict_blk_lpos;
++};
++
++/* A ringbuffer of "ID + data" elements. */
++struct prb_data_ring {
++ unsigned int size_bits;
++ char *data;
++ atomic_long_t head_lpos;
++ atomic_long_t tail_lpos;
++};
++
++/* A ringbuffer of "struct prb_desc" elements. */
++struct prb_desc_ring {
++ unsigned int count_bits;
++ struct prb_desc *descs;
++ atomic_long_t head_id;
++ atomic_long_t tail_id;
++};
++
++/*
++ * The high level structure representing the printk ringbuffer.
++ *
++ * @fail: Count of failed prb_reserve() calls where not even a data-less
++ * record was created.
++ */
++struct printk_ringbuffer {
++ struct prb_desc_ring desc_ring;
++ struct prb_data_ring text_data_ring;
++ struct prb_data_ring dict_data_ring;
++ atomic_long_t fail;
++};
++
++/*
++ * Used by writers as a reserve/commit handle.
++ *
++ * @rb: Ringbuffer where the entry is reserved.
++ * @irqflags: Saved irq flags to restore on entry commit.
++ * @id: ID of the reserved descriptor.
++ * @text_space: Total occupied buffer space in the text data ring, including
++ * ID, alignment padding, and wrapping data blocks.
++ *
++ * This structure is an opaque handle for writers. Its contents are only
++ * to be used by the ringbuffer implementation.
++ */
++struct prb_reserved_entry {
++ struct printk_ringbuffer *rb;
++ unsigned long irqflags;
++ unsigned long id;
++ unsigned int text_space;
++};
++
++#define _DATA_SIZE(sz_bits) (1UL << (sz_bits))
++#define _DESCS_COUNT(ct_bits) (1U << (ct_bits))
++#define DESC_SV_BITS (sizeof(unsigned long) * 8)
++#define DESC_COMMITTED_MASK (1UL << (DESC_SV_BITS - 1))
++#define DESC_REUSE_MASK (1UL << (DESC_SV_BITS - 2))
++#define DESC_FLAGS_MASK (DESC_COMMITTED_MASK | DESC_REUSE_MASK)
++#define DESC_ID_MASK (~DESC_FLAGS_MASK)
++#define DESC_ID(sv) ((sv) & DESC_ID_MASK)
++#define INVALID_LPOS 1
++
++#define INVALID_BLK_LPOS \
++{ \
++ .begin = INVALID_LPOS, \
++ .next = INVALID_LPOS, \
++}
++
++/*
++ * Descriptor Bootstrap
++ *
++ * The descriptor array is minimally initialized to allow immediate usage
++ * by readers and writers. The requirements that the descriptor array
++ * initialization must satisfy:
++ *
++ * Req1
++ * The tail must point to an existing (committed or reusable) descriptor.
++ * This is required by the implementation of prb_first_seq().
++ *
++ * Req2
++ * Readers must see that the ringbuffer is initially empty.
++ *
++ * Req3
++ * The first record reserved by a writer is assigned sequence number 0.
++ *
++ * To satisfy Req1, the tail initially points to a descriptor that is
++ * minimally initialized (having no data block, i.e. data-less with the
++ * data block's lpos @begin and @next values set to INVALID_LPOS).
++ *
++ * To satisfy Req2, the initial tail descriptor is initialized to the
++ * reusable state. Readers recognize reusable descriptors as existing
++ * records, but skip over them.
++ *
++ * To satisfy Req3, the last descriptor in the array is used as the initial
++ * head (and tail) descriptor. This allows the first record reserved by a
++ * writer (head + 1) to be the first descriptor in the array. (Only the first
++ * descriptor in the array could have a valid sequence number of 0.)
++ *
++ * The first time a descriptor is reserved, it is assigned a sequence number
++ * with the value of the array index. A "first time reserved" descriptor can
++ * be recognized because it has a sequence number of 0 but does not have an
++ * index of 0. (Only the first descriptor in the array could have a valid
++ * sequence number of 0.) After the first reservation, all future reservations
++ * (recycling) simply involve incrementing the sequence number by the array
++ * count.
++ *
++ * Hack #1
++ * Only the first descriptor in the array is allowed to have the sequence
++ * number 0. In this case it is not possible to recognize if it is being
++ * reserved the first time (set to index value) or has been reserved
++ * previously (increment by the array count). This is handled by _always_
++ * incrementing the sequence number by the array count when reserving the
++ * first descriptor in the array. In order to satisfy Req3, the sequence
++ * number of the first descriptor in the array is initialized to minus
++ * the array count. Then, upon the first reservation, it is incremented
++ * to 0, thus satisfying Req3.
++ *
++ * Hack #2
++ * prb_first_seq() can be called at any time by readers to retrieve the
++ * sequence number of the tail descriptor. However, due to Req2 and Req3,
++ * initially there are no records to report the sequence number of
++ * (sequence numbers are u64 and there is nothing less than 0). To handle
++ * this, the sequence number of the initial tail descriptor is initialized
++ * to 0. Technically this is incorrect, because there is no record with
++ * sequence number 0 (yet) and the tail descriptor is not the first
++ * descriptor in the array. But it allows prb_read_valid() to correctly
++ * report the existence of a record for _any_ given sequence number at all
++ * times. Bootstrapping is complete when the tail is pushed the first
++ * time, thus finally pointing to the first descriptor reserved by a
++ * writer, which has the assigned sequence number 0.
++ */
++
++/*
++ * Initiating Logical Value Overflows
++ *
++ * Both logical position (lpos) and ID values can be mapped to array indexes
++ * but may experience overflows during the lifetime of the system. To ensure
++ * that printk_ringbuffer can handle the overflows for these types, initial
++ * values are chosen that map to the correct initial array indexes, but will
++ * result in overflows soon.
++ *
++ * BLK0_LPOS
++ * The initial @head_lpos and @tail_lpos for data rings. It is at index
++ * 0 and the lpos value is such that it will overflow on the first wrap.
++ *
++ * DESC0_ID
++ * The initial @head_id and @tail_id for the desc ring. It is at the last
++ * index of the descriptor array (see Req3 above) and the ID value is such
++ * that it will overflow on the second wrap.
++ */
++#define BLK0_LPOS(sz_bits) (-(_DATA_SIZE(sz_bits)))
++#define DESC0_ID(ct_bits) DESC_ID(-(_DESCS_COUNT(ct_bits) + 1))
++#define DESC0_SV(ct_bits) (DESC_COMMITTED_MASK | DESC_REUSE_MASK | DESC0_ID(ct_bits))
++
++/*
++ * Define a ringbuffer with an external text data buffer. The same as
++ * DEFINE_PRINTKRB() but requires specifying an external buffer for the
++ * text data.
++ *
++ * Note: The specified external buffer must be of the size:
++ * 2 ^ (descbits + avgtextbits)
++ */
++#define _DEFINE_PRINTKRB(name, descbits, avgtextbits, avgdictbits, text_buf) \
++static char _##name##_dict[1U << ((avgdictbits) + (descbits))] \
++ __aligned(__alignof__(unsigned long)); \
++static struct prb_desc _##name##_descs[_DESCS_COUNT(descbits)] = { \
++ /* this will be the first record reserved by a writer */ \
++ [0] = { \
++ .info = { \
++ /* will be incremented to 0 on the first reservation */ \
++ .seq = -(u64)_DESCS_COUNT(descbits), \
++ }, \
++ }, \
++ /* the initial head and tail */ \
++ [_DESCS_COUNT(descbits) - 1] = { \
++ .info = { \
++ /* reports the first seq value during the bootstrap phase */ \
++ .seq = 0, \
++ }, \
++ /* reusable */ \
++ .state_var = ATOMIC_INIT(DESC0_SV(descbits)), \
++ /* no associated data block */ \
++ .text_blk_lpos = INVALID_BLK_LPOS, \
++ .dict_blk_lpos = INVALID_BLK_LPOS, \
++ }, \
++}; \
++static struct printk_ringbuffer name = { \
++ .desc_ring = { \
++ .count_bits = descbits, \
++ .descs = &_##name##_descs[0], \
++ .head_id = ATOMIC_INIT(DESC0_ID(descbits)), \
++ .tail_id = ATOMIC_INIT(DESC0_ID(descbits)), \
++ }, \
++ .text_data_ring = { \
++ .size_bits = (avgtextbits) + (descbits), \
++ .data = text_buf, \
++ .head_lpos = ATOMIC_LONG_INIT(BLK0_LPOS((avgtextbits) + (descbits))), \
++ .tail_lpos = ATOMIC_LONG_INIT(BLK0_LPOS((avgtextbits) + (descbits))), \
++ }, \
++ .dict_data_ring = { \
++ .size_bits = (avgtextbits) + (descbits), \
++ .data = &_##name##_dict[0], \
++ .head_lpos = ATOMIC_LONG_INIT(BLK0_LPOS((avgtextbits) + (descbits))), \
++ .tail_lpos = ATOMIC_LONG_INIT(BLK0_LPOS((avgtextbits) + (descbits))), \
++ }, \
++ .fail = ATOMIC_LONG_INIT(0), \
++}
++
++/**
++ * DEFINE_PRINTKRB() - Define a ringbuffer.
++ *
++ * @name: The name of the ringbuffer variable.
++ * @descbits: The number of descriptors as a power-of-2 value.
++ * @avgtextbits: The average text data size per record as a power-of-2 value.
++ * @avgdictbits: The average dictionary data size per record as a
++ * power-of-2 value.
++ *
++ * This is a macro for defining a ringbuffer and all internal structures
++ * such that it is ready for immediate use. See _DEFINE_PRINTKRB() for a
++ * variant where the text data buffer can be specified externally.
++ */
++#define DEFINE_PRINTKRB(name, descbits, avgtextbits, avgdictbits) \
++static char _##name##_text[1U << ((avgtextbits) + (descbits))] \
++ __aligned(__alignof__(unsigned long)); \
++_DEFINE_PRINTKRB(name, descbits, avgtextbits, avgdictbits, &_##name##_text[0])
++
++/* Writer Interface */
++
++/**
++ * prb_rec_init_wd() - Initialize a buffer for writing records.
++ *
++ * @r: The record to initialize.
++ * @text_buf_size: The needed text buffer size.
++ * @dict_buf_size: The needed dictionary buffer size.
++ *
++ * Initialize all the fields that a writer is interested in. If
++ * @dict_buf_size is 0, a dictionary buffer will not be reserved.
++ * @text_buf_size must be greater than 0.
++ *
++ * Note that although @dict_buf_size may be initialized to non-zero,
++ * its value must be rechecked after a successful call to prb_reserve()
++ * to verify a dictionary buffer was actually reserved. Dictionary buffer
++ * reservation is allowed to fail.
++ */
++static inline void prb_rec_init_wr(struct printk_record *r,
++ unsigned int text_buf_size,
++ unsigned int dict_buf_size)
++{
++ r->info = NULL;
++ r->text_buf = NULL;
++ r->dict_buf = NULL;
++ r->text_buf_size = text_buf_size;
++ r->dict_buf_size = dict_buf_size;
++}
++
++bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
++ struct printk_record *r);
++void prb_commit(struct prb_reserved_entry *e);
++
++void prb_init(struct printk_ringbuffer *rb,
++ char *text_buf, unsigned int text_buf_size,
++ char *dict_buf, unsigned int dict_buf_size,
++ struct prb_desc *descs, unsigned int descs_count_bits);
++unsigned int prb_record_text_space(struct prb_reserved_entry *e);
++
++/* Reader Interface */
++
++/**
++ * prb_rec_init_rd() - Initialize a buffer for reading records.
++ *
++ * @r: The record to initialize.
++ * @info: A buffer to store record meta-data.
++ * @text_buf: A buffer to store text data.
++ * @text_buf_size: The size of @text_buf.
++ * @dict_buf: A buffer to store dictionary data.
++ * @dict_buf_size: The size of @dict_buf.
++ *
++ * Initialize all the fields that a reader is interested in. All arguments
++ * (except @r) are optional. Only record data for arguments that are
++ * non-NULL or non-zero will be read.
++ */
++static inline void prb_rec_init_rd(struct printk_record *r,
++ struct printk_info *info,
++ char *text_buf, unsigned int text_buf_size,
++ char *dict_buf, unsigned int dict_buf_size)
++{
++ r->info = info;
++ r->text_buf = text_buf;
++ r->dict_buf = dict_buf;
++ r->text_buf_size = text_buf_size;
++ r->dict_buf_size = dict_buf_size;
++}
++
++/**
++ * prb_for_each_record() - Iterate over the records of a ringbuffer.
++ *
++ * @from: The sequence number to begin with.
++ * @rb: The ringbuffer to iterate over.
++ * @s: A u64 to store the sequence number on each iteration.
++ * @r: A printk_record to store the record on each iteration.
++ *
++ * This is a macro for conveniently iterating over a ringbuffer.
++ * Note that @s may not be the sequence number of the record on each
++ * iteration. For the sequence number, @r->info->seq should be checked.
++ *
++ * Context: Any context.
++ */
++#define prb_for_each_record(from, rb, s, r) \
++for ((s) = from; prb_read_valid(rb, s, r); (s) = (r)->info->seq + 1)
++
++/**
++ * prb_for_each_info() - Iterate over the meta data of a ringbuffer.
++ *
++ * @from: The sequence number to begin with.
++ * @rb: The ringbuffer to iterate over.
++ * @s: A u64 to store the sequence number on each iteration.
++ * @i: A printk_info to store the record meta data on each iteration.
++ * @lc: An unsigned int to store the text line count of each record.
++ *
++ * This is a macro for conveniently iterating over a ringbuffer.
++ * Note that @s may not be the sequence number of the record on each
++ * iteration. For the sequence number, @r->info->seq should be checked.
++ *
++ * Context: Any context.
++ */
++#define prb_for_each_info(from, rb, s, i, lc) \
++for ((s) = from; prb_read_valid_info(rb, s, i, lc); (s) = (i)->seq + 1)
++
++bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
++ struct printk_record *r);
++bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq,
++ struct printk_info *info, unsigned int *line_count);
++
++u64 prb_first_valid_seq(struct printk_ringbuffer *rb);
++u64 prb_next_seq(struct printk_ringbuffer *rb);
++
++#endif /* _KERNEL_PRINTK_RINGBUFFER_H */
diff --git a/debian/patches-rt/0002-printk-use-buffer-pools-for-sprint-buffers.patch b/debian/patches-rt/0002-printk-use-buffer-pools-for-sprint-buffers.patch
new file mode 100644
index 000000000..f96e9ea9e
--- /dev/null
+++ b/debian/patches-rt/0002-printk-use-buffer-pools-for-sprint-buffers.patch
@@ -0,0 +1,194 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Tue, 13 Oct 2020 22:57:55 +0200
+Subject: [PATCH 02/15] printk: use buffer pools for sprint buffers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+vprintk_store() is using a single static buffer as a temporary
+sprint buffer for the message text. This will not work once
+@logbuf_lock is removed. Replace the single static buffer with
+per-cpu and global pools.
+
+Each per-cpu pool is large enough to support a worse case of 2
+contexts (non-NMI and NMI).
+
+To support printk() recursion and printk() calls before per-cpu
+variables are ready, an extra/fallback global pool of 2 contexts is
+available.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 141 +++++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 137 insertions(+), 4 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1704,6 +1704,129 @@ SYSCALL_DEFINE3(syslog, int, type, char
+ }
+
+ /*
++ * The per-cpu sprint buffers are used with interrupts disabled, so each CPU
++ * only requires 2 buffers: for non-NMI and NMI contexts. Recursive printk()
++ * calls are handled by the global sprint buffers.
++ */
++#define SPRINT_CTX_DEPTH 2
++
++/* Static sprint buffers for early boot (only 1 CPU) and recursion. */
++static DECLARE_BITMAP(sprint_global_buffer_map, SPRINT_CTX_DEPTH);
++static char sprint_global_buffer[SPRINT_CTX_DEPTH][PREFIX_MAX + LOG_LINE_MAX];
++
++struct sprint_buffers {
++ char buf[SPRINT_CTX_DEPTH][PREFIX_MAX + LOG_LINE_MAX];
++ atomic_t index;
++};
++
++static DEFINE_PER_CPU(struct sprint_buffers, percpu_sprint_buffers);
++
++/*
++ * Acquire an unused buffer, returning its index. If no buffer is
++ * available, @count is returned.
++ */
++static int _get_sprint_buf(unsigned long *map, int count)
++{
++ int index;
++
++ do {
++ index = find_first_zero_bit(map, count);
++ if (index == count)
++ break;
++ /*
++ * Guarantee map changes are ordered for the other CPUs.
++ * Pairs with clear_bit() in _put_sprint_buf().
++ */
++ } while (test_and_set_bit(index, map));
++
++ return index;
++}
++
++/* Mark the buffer @index as unused. */
++static void _put_sprint_buf(unsigned long *map, unsigned int count, unsigned int index)
++{
++ /*
++ * Guarantee map changes are ordered for the other CPUs.
++ * Pairs with test_and_set_bit() in _get_sprint_buf().
++ */
++ clear_bit(index, map);
++}
++
++/*
++ * Get a buffer sized PREFIX_MAX+LOG_LINE_MAX for sprinting. On success, @id
++ * is set and interrupts are disabled. @id is used to put back the buffer.
++ *
++ * @id is non-negative for per-cpu buffers, negative for global buffers.
++ */
++static char *get_sprint_buf(int *id, unsigned long *flags)
++{
++ struct sprint_buffers *bufs;
++ unsigned int index;
++ unsigned int cpu;
++
++ local_irq_save(*flags);
++ cpu = get_cpu();
++
++ if (printk_percpu_data_ready()) {
++
++ /*
++ * First try with per-cpu pool. Note that the last
++ * buffer is reserved for NMI context.
++ */
++ bufs = per_cpu_ptr(&percpu_sprint_buffers, cpu);
++ index = atomic_read(&bufs->index);
++ if (index < (SPRINT_CTX_DEPTH - 1) ||
++ (in_nmi() && index < SPRINT_CTX_DEPTH)) {
++ atomic_set(&bufs->index, index + 1);
++ *id = cpu;
++ return &bufs->buf[index][0];
++ }
++ }
++
++ /*
++ * Fallback to global pool.
++ *
++ * The global pool will only ever be used if per-cpu data is not ready
++ * yet or printk recurses. Recursion will not occur unless printk is
++ * having internal issues.
++ */
++ index = _get_sprint_buf(sprint_global_buffer_map, SPRINT_CTX_DEPTH);
++ if (index != SPRINT_CTX_DEPTH) {
++ /* Convert to global buffer representation. */
++ *id = -index - 1;
++ return &sprint_global_buffer[index][0];
++ }
++
++ /* Failed to get a buffer. */
++ put_cpu();
++ local_irq_restore(*flags);
++ return NULL;
++}
++
++/* Put back an sprint buffer and restore interrupts. */
++static void put_sprint_buf(int id, unsigned long flags)
++{
++ struct sprint_buffers *bufs;
++ unsigned int index;
++ unsigned int cpu;
++
++ if (id >= 0) {
++ cpu = id;
++ bufs = per_cpu_ptr(&percpu_sprint_buffers, cpu);
++ index = atomic_read(&bufs->index);
++ atomic_set(&bufs->index, index - 1);
++ } else {
++ /* Convert from global buffer representation. */
++ index = -id - 1;
++ _put_sprint_buf(sprint_global_buffer_map,
++ SPRINT_CTX_DEPTH, index);
++ }
++
++ put_cpu();
++ local_irq_restore(flags);
++}
++
++/*
+ * Special console_lock variants that help to reduce the risk of soft-lockups.
+ * They allow to pass console_lock to another printk() call using a busy wait.
+ */
+@@ -1941,16 +2064,23 @@ int vprintk_store(int facility, int leve
+ const struct dev_printk_info *dev_info,
+ const char *fmt, va_list args)
+ {
+- static char textbuf[LOG_LINE_MAX];
+- char *text = textbuf;
+ size_t text_len;
+ enum log_flags lflags = 0;
++ unsigned long irqflags;
++ int sprint_id;
++ char *text;
++ int ret;
++
++ /* No buffer is available if printk has recursed too much. */
++ text = get_sprint_buf(&sprint_id, &irqflags);
++ if (!text)
++ return 0;
+
+ /*
+ * The printf needs to come first; we need the syslog
+ * prefix which might be passed-in as a parameter.
+ */
+- text_len = vscnprintf(text, sizeof(textbuf), fmt, args);
++ text_len = vscnprintf(text, LOG_LINE_MAX, fmt, args);
+
+ /* mark and strip a trailing newline */
+ if (text_len && text[text_len-1] == '\n') {
+@@ -1983,7 +2113,10 @@ int vprintk_store(int facility, int leve
+ if (dev_info)
+ lflags |= LOG_NEWLINE;
+
+- return log_output(facility, level, lflags, dev_info, text, text_len);
++ ret = log_output(facility, level, lflags, dev_info, text, text_len);
++
++ put_sprint_buf(sprint_id, irqflags);
++ return ret;
+ }
+
+ asmlinkage int vprintk_emit(int facility, int level,
diff --git a/debian/patches-rt/0002-sched-Fix-balance_callback.patch b/debian/patches-rt/0002-sched-Fix-balance_callback.patch
index e35806b2b..babf0e34d 100644
--- a/debian/patches-rt/0002-sched-Fix-balance_callback.patch
+++ b/debian/patches-rt/0002-sched-Fix-balance_callback.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz@infradead.org>
-Date: Mon, 5 Oct 2020 16:57:19 +0200
-Subject: [PATCH 02/17] sched: Fix balance_callback()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Date: Fri, 23 Oct 2020 12:12:00 +0200
+Subject: [PATCH 02/19] sched: Fix balance_callback()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The intent of balance_callback() has always been to delay executing
balancing operations until the end of the current rq->lock section.
diff --git a/debian/patches-rt/0003-Revert-printk-lock-unlock-console-only-for-new-logbu.patch b/debian/patches-rt/0003-Revert-printk-lock-unlock-console-only-for-new-logbu.patch
new file mode 100644
index 000000000..a26c0d171
--- /dev/null
+++ b/debian/patches-rt/0003-Revert-printk-lock-unlock-console-only-for-new-logbu.patch
@@ -0,0 +1,61 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Thu, 9 Jul 2020 15:29:43 +0206
+Subject: [PATCH 03/25] Revert "printk: lock/unlock console only for new logbuf
+ entries"
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+This reverts commit 3ac37a93fa9217e576bebfd4ba3e80edaaeb2289.
+
+This optimization will not apply once the transition to a lockless
+printk is complete. Rather than porting this optimization through
+the transition only to remove it anyway, just revert it now to
+simplify the transition.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Acked-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200709132344.760-4-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 10 +++-------
+ 1 file changed, 3 insertions(+), 7 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1989,9 +1989,8 @@ asmlinkage int vprintk_emit(int facility
+ const char *fmt, va_list args)
+ {
+ int printed_len;
+- bool in_sched = false, pending_output;
++ bool in_sched = false;
+ unsigned long flags;
+- u64 curr_log_seq;
+
+ /* Suppress unimportant messages after panic happens */
+ if (unlikely(suppress_printk))
+@@ -2007,13 +2006,11 @@ asmlinkage int vprintk_emit(int facility
+
+ /* This stops the holder of console_sem just where we want him */
+ logbuf_lock_irqsave(flags);
+- curr_log_seq = log_next_seq;
+ printed_len = vprintk_store(facility, level, dict, dictlen, fmt, args);
+- pending_output = (curr_log_seq != log_next_seq);
+ logbuf_unlock_irqrestore(flags);
+
+ /* If called from the scheduler, we can not call up(). */
+- if (!in_sched && pending_output) {
++ if (!in_sched) {
+ /*
+ * Disable preemption to avoid being preempted while holding
+ * console_sem which would prevent anyone from printing to
+@@ -2030,8 +2027,7 @@ asmlinkage int vprintk_emit(int facility
+ preempt_enable();
+ }
+
+- if (pending_output)
+- wake_up_klogd();
++ wake_up_klogd();
+ return printed_len;
+ }
+ EXPORT_SYMBOL(vprintk_emit);
diff --git a/debian/patches-rt/0003-blk-mq-Use-llist_head-for-blk_cpu_done.patch b/debian/patches-rt/0003-blk-mq-Use-llist_head-for-blk_cpu_done.patch
new file mode 100644
index 000000000..96b6e7e98
--- /dev/null
+++ b/debian/patches-rt/0003-blk-mq-Use-llist_head-for-blk_cpu_done.patch
@@ -0,0 +1,166 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 28 Oct 2020 11:08:21 +0100
+Subject: [PATCH 3/3] blk-mq: Use llist_head for blk_cpu_done
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+With llist_head it is possible to avoid the locking (the irq-off region)
+when items are added. This makes it possible to add items on a remote
+CPU.
+llist_add() returns true if the list was previously empty. This can be
+used to invoke the SMP function call / raise sofirq only if the first
+item was added (otherwise it is already pending).
+This simplifies the code a little and reduces the IRQ-off regions. With
+this change it possible to reduce the SMP-function call a simple
+__raise_softirq_irqoff().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ block/blk-mq.c | 78 +++++++++++++++----------------------------------
+ include/linux/blkdev.h | 2 -
+ 2 files changed, 26 insertions(+), 54 deletions(-)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -41,7 +41,7 @@
+ #include "blk-mq-sched.h"
+ #include "blk-rq-qos.h"
+
+-static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
++static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
+
+ static void blk_mq_poll_stats_start(struct request_queue *q);
+ static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
+@@ -565,68 +565,32 @@ void blk_mq_end_request(struct request *
+ }
+ EXPORT_SYMBOL(blk_mq_end_request);
+
+-/*
+- * Softirq action handler - move entries to local list and loop over them
+- * while passing them to the queue registered handler.
+- */
+-static __latent_entropy void blk_done_softirq(struct softirq_action *h)
++static void blk_complete_reqs(struct llist_head *cpu_list)
+ {
+- struct list_head *cpu_list, local_list;
++ struct llist_node *entry;
++ struct request *rq, *rq_next;
+
+- local_irq_disable();
+- cpu_list = this_cpu_ptr(&blk_cpu_done);
+- list_replace_init(cpu_list, &local_list);
+- local_irq_enable();
++ entry = llist_del_all(cpu_list);
++ entry = llist_reverse_order(entry);
+
+- while (!list_empty(&local_list)) {
+- struct request *rq;
+-
+- rq = list_entry(local_list.next, struct request, ipi_list);
+- list_del_init(&rq->ipi_list);
++ llist_for_each_entry_safe(rq, rq_next, entry, ipi_list)
+ rq->q->mq_ops->complete(rq);
+- }
+ }
+
+-static void blk_mq_trigger_softirq(struct request *rq)
++static __latent_entropy void blk_done_softirq(struct softirq_action *h)
+ {
+- struct list_head *list;
+- unsigned long flags;
+-
+- local_irq_save(flags);
+- list = this_cpu_ptr(&blk_cpu_done);
+- list_add_tail(&rq->ipi_list, list);
+-
+- /*
+- * If the list only contains our just added request, signal a raise of
+- * the softirq. If there are already entries there, someone already
+- * raised the irq but it hasn't run yet.
+- */
+- if (list->next == &rq->ipi_list)
+- raise_softirq_irqoff(BLOCK_SOFTIRQ);
+- local_irq_restore(flags);
++ blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
+ }
+
+ static int blk_softirq_cpu_dead(unsigned int cpu)
+ {
+- /*
+- * If a CPU goes away, splice its entries to the current CPU
+- * and trigger a run of the softirq
+- */
+- local_irq_disable();
+- list_splice_init(&per_cpu(blk_cpu_done, cpu),
+- this_cpu_ptr(&blk_cpu_done));
+- raise_softirq_irqoff(BLOCK_SOFTIRQ);
+- local_irq_enable();
+-
++ blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
+ return 0;
+ }
+
+-
+ static void __blk_mq_complete_request_remote(void *data)
+ {
+- struct request *rq = data;
+-
+- blk_mq_trigger_softirq(rq);
++ __raise_softirq_irqoff(BLOCK_SOFTIRQ);
+ }
+
+ static inline bool blk_mq_complete_need_ipi(struct request *rq)
+@@ -657,6 +621,7 @@ static inline bool blk_mq_complete_need_
+
+ bool blk_mq_complete_request_remote(struct request *rq)
+ {
++ struct llist_head *cpu_list;
+ WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
+
+ /*
+@@ -667,14 +632,21 @@ bool blk_mq_complete_request_remote(stru
+ return false;
+
+ if (blk_mq_complete_need_ipi(rq)) {
+- rq->csd.func = __blk_mq_complete_request_remote;
+- rq->csd.info = rq;
+- rq->csd.flags = 0;
+- smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd);
++ unsigned int cpu;
++
++ cpu = rq->mq_ctx->cpu;
++ cpu_list = &per_cpu(blk_cpu_done, cpu);
++ if (llist_add(&rq->ipi_list, cpu_list)) {
++ rq->csd.func = __blk_mq_complete_request_remote;
++ rq->csd.flags = 0;
++ smp_call_function_single_async(cpu, &rq->csd);
++ }
+ } else {
+ if (rq->q->nr_hw_queues > 1)
+ return false;
+- blk_mq_trigger_softirq(rq);
++ cpu_list = this_cpu_ptr(&blk_cpu_done);
++ if (llist_add(&rq->ipi_list, cpu_list))
++ raise_softirq(BLOCK_SOFTIRQ);
+ }
+
+ return true;
+@@ -3877,7 +3849,7 @@ static int __init blk_mq_init(void)
+ int i;
+
+ for_each_possible_cpu(i)
+- INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
++ init_llist_head(&per_cpu(blk_cpu_done, i));
+ open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
+
+ cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -154,7 +154,7 @@ struct request {
+ */
+ union {
+ struct hlist_node hash; /* merge hash */
+- struct list_head ipi_list;
++ struct llist_node ipi_list;
+ };
+
+ /*
diff --git a/debian/patches-rt/0003-locking-rtmutex-Move-rt_mutex_init-outside-of-CONFIG.patch b/debian/patches-rt/0003-locking-rtmutex-Move-rt_mutex_init-outside-of-CONFIG.patch
index 69c4caccb..28f0f0040 100644
--- a/debian/patches-rt/0003-locking-rtmutex-Move-rt_mutex_init-outside-of-CONFIG.patch
+++ b/debian/patches-rt/0003-locking-rtmutex-Move-rt_mutex_init-outside-of-CONFIG.patch
@@ -1,8 +1,8 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 29 Sep 2020 16:32:49 +0200
-Subject: [PATCH 03/23] locking/rtmutex: Move rt_mutex_init() outside of
+Subject: [PATCH 03/22] locking/rtmutex: Move rt_mutex_init() outside of
CONFIG_DEBUG_RT_MUTEXES
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
rt_mutex_init() only initializes lockdep if CONFIG_DEBUG_RT_MUTEXES is
enabled. The static initializer (DEFINE_RT_MUTEX) does not have such a
diff --git a/debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch b/debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch
index 2845f669e..91f7a6523 100644
--- a/debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch
+++ b/debian/patches-rt/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 28 May 2018 15:24:22 +0200
Subject: [PATCH 3/4] mm/SLxB: change list_lock to raw_spinlock_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The list_lock is used with used with IRQs off on RT. Make it a raw_spinlock_t
otherwise the interrupts won't be disabled on -RT. The locking rules remain
diff --git a/debian/patches-rt/0003-printk-change-clear_seq-to-atomic64_t.patch b/debian/patches-rt/0003-printk-change-clear_seq-to-atomic64_t.patch
new file mode 100644
index 000000000..263e84ef2
--- /dev/null
+++ b/debian/patches-rt/0003-printk-change-clear_seq-to-atomic64_t.patch
@@ -0,0 +1,110 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Tue, 13 Oct 2020 23:19:35 +0200
+Subject: [PATCH 03/15] printk: change @clear_seq to atomic64_t
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+Currently @clear_seq access is protected by @logbuf_lock. Once
+@logbuf_lock is removed some other form of synchronization will be
+required. Change the type of @clear_seq to atomic64_t to provide the
+synchronization.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 22 ++++++++++++++--------
+ 1 file changed, 14 insertions(+), 8 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -403,7 +403,7 @@ static u64 exclusive_console_stop_seq;
+ static unsigned long console_dropped;
+
+ /* the next printk record to read after the last 'clear' command */
+-static u64 clear_seq;
++static atomic64_t clear_seq = ATOMIC64_INIT(0);
+
+ #ifdef CONFIG_PRINTK_CALLER
+ #define PREFIX_MAX 48
+@@ -843,7 +843,7 @@ static loff_t devkmsg_llseek(struct file
+ * like issued by 'dmesg -c'. Reading /dev/kmsg itself
+ * changes no global state, and does not clear anything.
+ */
+- user->seq = clear_seq;
++ user->seq = atomic64_read(&clear_seq);
+ break;
+ case SEEK_END:
+ /* after the last record */
+@@ -960,6 +960,9 @@ void log_buf_vmcoreinfo_setup(void)
+ * parse it and detect any changes to structure down the line.
+ */
+
++ VMCOREINFO_SIZE(atomic64_t);
++ VMCOREINFO_TYPE_OFFSET(atomic64_t, counter);
++
+ VMCOREINFO_STRUCT_SIZE(printk_ringbuffer);
+ VMCOREINFO_OFFSET(printk_ringbuffer, desc_ring);
+ VMCOREINFO_OFFSET(printk_ringbuffer, text_data_ring);
+@@ -1522,6 +1525,7 @@ static int syslog_print_all(char __user
+ struct printk_info info;
+ unsigned int line_count;
+ struct printk_record r;
++ u64 clr_seq;
+ char *text;
+ int len = 0;
+ u64 seq;
+@@ -1533,15 +1537,17 @@ static int syslog_print_all(char __user
+
+ time = printk_time;
+ logbuf_lock_irq();
++ clr_seq = atomic64_read(&clear_seq);
++
+ /*
+ * Find first record that fits, including all following records,
+ * into the user-provided buffer for this dump.
+ */
+- prb_for_each_info(clear_seq, prb, seq, &info, &line_count)
++ prb_for_each_info(clr_seq, prb, seq, &info, &line_count)
+ len += get_record_print_text_size(&info, line_count, true, time);
+
+ /* move first record forward until length fits into the buffer */
+- prb_for_each_info(clear_seq, prb, seq, &info, &line_count) {
++ prb_for_each_info(clr_seq, prb, seq, &info, &line_count) {
+ if (len <= size)
+ break;
+ len -= get_record_print_text_size(&info, line_count, true, time);
+@@ -1572,7 +1578,7 @@ static int syslog_print_all(char __user
+ }
+
+ if (clear)
+- clear_seq = seq;
++ atomic64_set(&clear_seq, seq);
+ logbuf_unlock_irq();
+
+ kfree(text);
+@@ -1582,7 +1588,7 @@ static int syslog_print_all(char __user
+ static void syslog_clear(void)
+ {
+ logbuf_lock_irq();
+- clear_seq = prb_next_seq(prb);
++ atomic64_set(&clear_seq, prb_next_seq(prb));
+ logbuf_unlock_irq();
+ }
+
+@@ -3355,7 +3361,7 @@ void kmsg_dump(enum kmsg_dump_reason rea
+ dumper->active = true;
+
+ logbuf_lock_irqsave(flags);
+- dumper->cur_seq = clear_seq;
++ dumper->cur_seq = atomic64_read(&clear_seq);
+ dumper->next_seq = prb_next_seq(prb);
+ logbuf_unlock_irqrestore(flags);
+
+@@ -3563,7 +3569,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
+ */
+ void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
+ {
+- dumper->cur_seq = clear_seq;
++ dumper->cur_seq = atomic64_read(&clear_seq);
+ dumper->next_seq = prb_next_seq(prb);
+ }
+
diff --git a/debian/patches-rt/0003-printk-rb-define-ring-buffer-struct-and-initializer.patch b/debian/patches-rt/0003-printk-rb-define-ring-buffer-struct-and-initializer.patch
deleted file mode 100644
index 8c1925ed1..000000000
--- a/debian/patches-rt/0003-printk-rb-define-ring-buffer-struct-and-initializer.patch
+++ /dev/null
@@ -1,58 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:41 +0100
-Subject: [PATCH 03/25] printk-rb: define ring buffer struct and initializer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-See Documentation/printk-ringbuffer.txt for details about the
-initializer arguments.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/printk_ringbuffer.h | 28 ++++++++++++++++++++++++++++
- 1 file changed, 28 insertions(+)
-
---- a/include/linux/printk_ringbuffer.h
-+++ b/include/linux/printk_ringbuffer.h
-@@ -10,6 +10,20 @@ struct prb_cpulock {
- unsigned long __percpu *irqflags;
- };
-
-+struct printk_ringbuffer {
-+ void *buffer;
-+ unsigned int size_bits;
-+
-+ u64 seq;
-+
-+ atomic_long_t tail;
-+ atomic_long_t head;
-+ atomic_long_t reserve;
-+
-+ struct prb_cpulock *cpulock;
-+ atomic_t ctx;
-+};
-+
- #define DECLARE_STATIC_PRINTKRB_CPULOCK(name) \
- static DEFINE_PER_CPU(unsigned long, _##name##_percpu_irqflags); \
- static struct prb_cpulock name = { \
-@@ -17,6 +31,20 @@ static struct prb_cpulock name = { \
- .irqflags = &_##name##_percpu_irqflags, \
- }
-
-+#define DECLARE_STATIC_PRINTKRB(name, szbits, cpulockptr) \
-+static char _##name##_buffer[1 << (szbits)] \
-+ __aligned(__alignof__(long)); \
-+static struct printk_ringbuffer name = { \
-+ .buffer = &_##name##_buffer[0], \
-+ .size_bits = szbits, \
-+ .seq = 0, \
-+ .tail = ATOMIC_LONG_INIT(-111 * sizeof(long)), \
-+ .head = ATOMIC_LONG_INIT(-111 * sizeof(long)), \
-+ .reserve = ATOMIC_LONG_INIT(-111 * sizeof(long)), \
-+ .cpulock = cpulockptr, \
-+ .ctx = ATOMIC_INIT(0), \
-+}
-+
- /* utility functions */
- void prb_lock(struct prb_cpulock *cpu_lock, unsigned int *cpu_store);
- void prb_unlock(struct prb_cpulock *cpu_lock, unsigned int cpu_store);
diff --git a/debian/patches-rt/0003-sched-hotplug-Ensure-only-per-cpu-kthreads-run-durin.patch b/debian/patches-rt/0003-sched-hotplug-Ensure-only-per-cpu-kthreads-run-durin.patch
index 179a94cd8..6f3a2771b 100644
--- a/debian/patches-rt/0003-sched-hotplug-Ensure-only-per-cpu-kthreads-run-durin.patch
+++ b/debian/patches-rt/0003-sched-hotplug-Ensure-only-per-cpu-kthreads-run-durin.patch
@@ -1,8 +1,8 @@
From: Peter Zijlstra <peterz@infradead.org>
-Date: Mon, 5 Oct 2020 16:57:20 +0200
-Subject: [PATCH 03/17] sched/hotplug: Ensure only per-cpu kthreads run during
+Date: Fri, 23 Oct 2020 12:12:01 +0200
+Subject: [PATCH 03/19] sched/hotplug: Ensure only per-cpu kthreads run during
hotplug
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
In preparation for migrate_disable(), make sure only per-cpu kthreads
are allowed to run on !active CPUs.
@@ -22,9 +22,9 @@ not affected.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/sched/core.c | 118 ++++++++++++++++++++++++++++++++++++++++++++++++++-
+ kernel/sched/core.c | 114 ++++++++++++++++++++++++++++++++++++++++++++++++++-
kernel/sched/sched.h | 7 ++-
- 2 files changed, 122 insertions(+), 3 deletions(-)
+ 2 files changed, 118 insertions(+), 3 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -40,30 +40,29 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return head;
}
-@@ -3535,6 +3537,22 @@ static inline void balance_callbacks(str
+@@ -3535,6 +3537,21 @@ static inline void balance_callbacks(str
}
}
-+static bool balance_push(struct rq *rq);
++static void balance_push(struct rq *rq);
+
+static inline void balance_switch(struct rq *rq)
+{
-+ if (unlikely(rq->balance_flags)) {
-+ /*
-+ * Run the balance_callbacks, except on hotplug
-+ * when we need to push the current task away.
-+ */
-+ if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
-+ !(rq->balance_flags & BALANCE_PUSH) ||
-+ !balance_push(rq))
-+ __balance_callbacks(rq);
++ if (likely(!rq->balance_flags))
++ return;
++
++ if (rq->balance_flags & BALANCE_PUSH) {
++ balance_push(rq);
++ return;
+ }
++
++ __balance_callbacks(rq);
+}
+
#else
static inline void __balance_callbacks(struct rq *rq)
-@@ -3550,6 +3568,10 @@ static inline void balance_callbacks(str
+@@ -3550,6 +3567,10 @@ static inline void balance_callbacks(str
{
}
@@ -74,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
static inline void
-@@ -3577,7 +3599,7 @@ static inline void finish_lock_switch(st
+@@ -3577,7 +3598,7 @@ static inline void finish_lock_switch(st
* prev into current:
*/
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
@@ -83,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irq(&rq->lock);
}
-@@ -6833,6 +6855,93 @@ static void migrate_tasks(struct rq *dea
+@@ -6833,6 +6854,90 @@ static void migrate_tasks(struct rq *dea
rq->stop = stop;
}
@@ -118,7 +117,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+/*
+ * Ensure we only run per-cpu kthreads once the CPU goes !active.
+ */
-+static bool balance_push(struct rq *rq)
++static void balance_push(struct rq *rq)
+{
+ struct task_struct *push_task = rq->curr;
+
@@ -130,7 +129,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * required to complete the hotplug process.
+ */
+ if (is_per_cpu_kthread(push_task))
-+ return false;
++ return;
+
+ get_task_struct(push_task);
+ /*
@@ -146,8 +145,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * which is_per_cpu_kthread() and will push this task away.
+ */
+ raw_spin_lock(&rq->lock);
-+
-+ return true;
+}
+
+static void balance_push_set(int cpu, bool on)
@@ -165,19 +162,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+#else
+
-+static inline bool balance_push(struct rq *rq)
++static inline void balance_push(struct rq *rq)
+{
-+ return false;
+}
+
-+static void balance_push_set(int cpu, bool on)
++static inline void balance_push_set(int cpu, bool on)
+{
+}
+
#endif /* CONFIG_HOTPLUG_CPU */
void set_rq_online(struct rq *rq)
-@@ -6918,6 +7027,8 @@ int sched_cpu_activate(unsigned int cpu)
+@@ -6918,6 +7023,8 @@ int sched_cpu_activate(unsigned int cpu)
struct rq *rq = cpu_rq(cpu);
struct rq_flags rf;
@@ -186,7 +182,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_SCHED_SMT
/*
* When going up, increment the number of cores with SMT present.
-@@ -6965,6 +7076,8 @@ int sched_cpu_deactivate(unsigned int cp
+@@ -6965,6 +7072,8 @@ int sched_cpu_deactivate(unsigned int cp
*/
synchronize_rcu();
@@ -195,7 +191,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_SCHED_SMT
/*
* When going down, decrement the number of cores with SMT present.
-@@ -6978,6 +7091,7 @@ int sched_cpu_deactivate(unsigned int cp
+@@ -6978,6 +7087,7 @@ int sched_cpu_deactivate(unsigned int cp
ret = cpuset_cpu_inactive(cpu);
if (ret) {
diff --git a/debian/patches-rt/0003-seqlock-Introduce-seqcount_latch_t.patch b/debian/patches-rt/0003-seqlock-Introduce-seqcount_latch_t.patch
index 64bdb571b..d0f5ec44c 100644
--- a/debian/patches-rt/0003-seqlock-Introduce-seqcount_latch_t.patch
+++ b/debian/patches-rt/0003-seqlock-Introduce-seqcount_latch_t.patch
@@ -1,7 +1,7 @@
From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
Date: Thu, 27 Aug 2020 13:40:39 +0200
Subject: [PATCH 03/13] seqlock: Introduce seqcount_latch_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Latch sequence counters are a multiversion concurrency control mechanism
where the seqcount_t counter even/odd value is used to switch between
diff --git a/debian/patches-rt/0004-locking-rtmutex-Remove-rt_mutex_timed_lock.patch b/debian/patches-rt/0004-locking-rtmutex-Remove-rt_mutex_timed_lock.patch
index 5a5c94102..7328a497f 100644
--- a/debian/patches-rt/0004-locking-rtmutex-Remove-rt_mutex_timed_lock.patch
+++ b/debian/patches-rt/0004-locking-rtmutex-Remove-rt_mutex_timed_lock.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 7 Oct 2020 12:11:33 +0200
-Subject: [PATCH 04/23] locking/rtmutex: Remove rt_mutex_timed_lock()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Subject: [PATCH 04/22] locking/rtmutex: Remove rt_mutex_timed_lock()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
rt_mutex_timed_lock() has no callers since commit
c051b21f71d1f ("rtmutex: Confine deadlock logic to futex")
diff --git a/debian/patches-rt/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch b/debian/patches-rt/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch
index 9257af0a1..38b261155 100644
--- a/debian/patches-rt/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch
+++ b/debian/patches-rt/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch
@@ -2,7 +2,7 @@ From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 21 Jun 2018 17:29:19 +0200
Subject: [PATCH 4/4] mm/SLUB: delay giving back empty slubs to IRQ enabled
regions
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
__free_slab() is invoked with disabled interrupts which increases the
irq-off time while __free_pages() is doing the work.
diff --git a/debian/patches-rt/0004-printk-rb-add-writer-interface.patch b/debian/patches-rt/0004-printk-rb-add-writer-interface.patch
deleted file mode 100644
index e84b884ed..000000000
--- a/debian/patches-rt/0004-printk-rb-add-writer-interface.patch
+++ /dev/null
@@ -1,234 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:42 +0100
-Subject: [PATCH 04/25] printk-rb: add writer interface
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-Add the writer functions prb_reserve() and prb_commit(). These make
-use of processor-reentrant spin locks to limit the number of possible
-interruption scenarios for the writers.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/printk_ringbuffer.h | 17 +++
- lib/printk_ringbuffer.c | 172 ++++++++++++++++++++++++++++++++++++++
- 2 files changed, 189 insertions(+)
-
---- a/include/linux/printk_ringbuffer.h
-+++ b/include/linux/printk_ringbuffer.h
-@@ -24,6 +24,18 @@ struct printk_ringbuffer {
- atomic_t ctx;
- };
-
-+struct prb_entry {
-+ unsigned int size;
-+ u64 seq;
-+ char data[0];
-+};
-+
-+struct prb_handle {
-+ struct printk_ringbuffer *rb;
-+ unsigned int cpu;
-+ struct prb_entry *entry;
-+};
-+
- #define DECLARE_STATIC_PRINTKRB_CPULOCK(name) \
- static DEFINE_PER_CPU(unsigned long, _##name##_percpu_irqflags); \
- static struct prb_cpulock name = { \
-@@ -45,6 +57,11 @@ static struct printk_ringbuffer name = {
- .ctx = ATOMIC_INIT(0), \
- }
-
-+/* writer interface */
-+char *prb_reserve(struct prb_handle *h, struct printk_ringbuffer *rb,
-+ unsigned int size);
-+void prb_commit(struct prb_handle *h);
-+
- /* utility functions */
- void prb_lock(struct prb_cpulock *cpu_lock, unsigned int *cpu_store);
- void prb_unlock(struct prb_cpulock *cpu_lock, unsigned int cpu_store);
---- a/lib/printk_ringbuffer.c
-+++ b/lib/printk_ringbuffer.c
-@@ -2,6 +2,14 @@
- #include <linux/smp.h>
- #include <linux/printk_ringbuffer.h>
-
-+#define PRB_SIZE(rb) (1 << rb->size_bits)
-+#define PRB_SIZE_BITMASK(rb) (PRB_SIZE(rb) - 1)
-+#define PRB_INDEX(rb, lpos) (lpos & PRB_SIZE_BITMASK(rb))
-+#define PRB_WRAPS(rb, lpos) (lpos >> rb->size_bits)
-+#define PRB_WRAP_LPOS(rb, lpos, xtra) \
-+ ((PRB_WRAPS(rb, lpos) + xtra) << rb->size_bits)
-+#define PRB_DATA_ALIGN sizeof(long)
-+
- static bool __prb_trylock(struct prb_cpulock *cpu_lock,
- unsigned int *cpu_store)
- {
-@@ -75,3 +83,167 @@ void prb_unlock(struct prb_cpulock *cpu_
-
- put_cpu();
- }
-+
-+static struct prb_entry *to_entry(struct printk_ringbuffer *rb,
-+ unsigned long lpos)
-+{
-+ char *buffer = rb->buffer;
-+ buffer += PRB_INDEX(rb, lpos);
-+ return (struct prb_entry *)buffer;
-+}
-+
-+static int calc_next(struct printk_ringbuffer *rb, unsigned long tail,
-+ unsigned long lpos, int size, unsigned long *calced_next)
-+{
-+ unsigned long next_lpos;
-+ int ret = 0;
-+again:
-+ next_lpos = lpos + size;
-+ if (next_lpos - tail > PRB_SIZE(rb))
-+ return -1;
-+
-+ if (PRB_WRAPS(rb, lpos) != PRB_WRAPS(rb, next_lpos)) {
-+ lpos = PRB_WRAP_LPOS(rb, next_lpos, 0);
-+ ret |= 1;
-+ goto again;
-+ }
-+
-+ *calced_next = next_lpos;
-+ return ret;
-+}
-+
-+static bool push_tail(struct printk_ringbuffer *rb, unsigned long tail)
-+{
-+ unsigned long new_tail;
-+ struct prb_entry *e;
-+ unsigned long head;
-+
-+ if (tail != atomic_long_read(&rb->tail))
-+ return true;
-+
-+ e = to_entry(rb, tail);
-+ if (e->size != -1)
-+ new_tail = tail + e->size;
-+ else
-+ new_tail = PRB_WRAP_LPOS(rb, tail, 1);
-+
-+ /* make sure the new tail does not overtake the head */
-+ head = atomic_long_read(&rb->head);
-+ if (head - new_tail > PRB_SIZE(rb))
-+ return false;
-+
-+ atomic_long_cmpxchg(&rb->tail, tail, new_tail);
-+ return true;
-+}
-+
-+/*
-+ * prb_commit: Commit a reserved entry to the ring buffer.
-+ * @h: An entry handle referencing the data entry to commit.
-+ *
-+ * Commit data that has been reserved using prb_reserve(). Once the data
-+ * block has been committed, it can be invalidated at any time. If a writer
-+ * is interested in using the data after committing, the writer should make
-+ * its own copy first or use the prb_iter_ reader functions to access the
-+ * data in the ring buffer.
-+ *
-+ * It is safe to call this function from any context and state.
-+ */
-+void prb_commit(struct prb_handle *h)
-+{
-+ struct printk_ringbuffer *rb = h->rb;
-+ struct prb_entry *e;
-+ unsigned long head;
-+ unsigned long res;
-+
-+ for (;;) {
-+ if (atomic_read(&rb->ctx) != 1) {
-+ /* the interrupted context will fixup head */
-+ atomic_dec(&rb->ctx);
-+ break;
-+ }
-+ /* assign sequence numbers before moving head */
-+ head = atomic_long_read(&rb->head);
-+ res = atomic_long_read(&rb->reserve);
-+ while (head != res) {
-+ e = to_entry(rb, head);
-+ if (e->size == -1) {
-+ head = PRB_WRAP_LPOS(rb, head, 1);
-+ continue;
-+ }
-+ e->seq = ++rb->seq;
-+ head += e->size;
-+ }
-+ atomic_long_set_release(&rb->head, res);
-+ atomic_dec(&rb->ctx);
-+
-+ if (atomic_long_read(&rb->reserve) == res)
-+ break;
-+ atomic_inc(&rb->ctx);
-+ }
-+
-+ prb_unlock(rb->cpulock, h->cpu);
-+}
-+
-+/*
-+ * prb_reserve: Reserve an entry within a ring buffer.
-+ * @h: An entry handle to be setup and reference an entry.
-+ * @rb: A ring buffer to reserve data within.
-+ * @size: The number of bytes to reserve.
-+ *
-+ * Reserve an entry of at least @size bytes to be used by the caller. If
-+ * successful, the data region of the entry belongs to the caller and cannot
-+ * be invalidated by any other task/context. For this reason, the caller
-+ * should call prb_commit() as quickly as possible in order to avoid preventing
-+ * other tasks/contexts from reserving data in the case that the ring buffer
-+ * has wrapped.
-+ *
-+ * It is safe to call this function from any context and state.
-+ *
-+ * Returns a pointer to the reserved entry (and @h is setup to reference that
-+ * entry) or NULL if it was not possible to reserve data.
-+ */
-+char *prb_reserve(struct prb_handle *h, struct printk_ringbuffer *rb,
-+ unsigned int size)
-+{
-+ unsigned long tail, res1, res2;
-+ int ret;
-+
-+ if (size == 0)
-+ return NULL;
-+ size += sizeof(struct prb_entry);
-+ size += PRB_DATA_ALIGN - 1;
-+ size &= ~(PRB_DATA_ALIGN - 1);
-+ if (size >= PRB_SIZE(rb))
-+ return NULL;
-+
-+ h->rb = rb;
-+ prb_lock(rb->cpulock, &h->cpu);
-+
-+ atomic_inc(&rb->ctx);
-+
-+ do {
-+ for (;;) {
-+ tail = atomic_long_read(&rb->tail);
-+ res1 = atomic_long_read(&rb->reserve);
-+ ret = calc_next(rb, tail, res1, size, &res2);
-+ if (ret >= 0)
-+ break;
-+ if (!push_tail(rb, tail)) {
-+ prb_commit(h);
-+ return NULL;
-+ }
-+ }
-+ } while (!atomic_long_try_cmpxchg_acquire(&rb->reserve, &res1, res2));
-+
-+ h->entry = to_entry(rb, res1);
-+
-+ if (ret) {
-+ /* handle wrap */
-+ h->entry->size = -1;
-+ h->entry = to_entry(rb, PRB_WRAP_LPOS(rb, res2, 0));
-+ }
-+
-+ h->entry->size = size;
-+
-+ return &h->entry->data[0];
-+}
diff --git a/debian/patches-rt/0004-printk-remove-logbuf_lock-add-syslog_lock.patch b/debian/patches-rt/0004-printk-remove-logbuf_lock-add-syslog_lock.patch
new file mode 100644
index 000000000..1c2361b81
--- /dev/null
+++ b/debian/patches-rt/0004-printk-remove-logbuf_lock-add-syslog_lock.patch
@@ -0,0 +1,573 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 14 Oct 2020 19:06:12 +0200
+Subject: [PATCH 04/15] printk: remove logbuf_lock, add syslog_lock
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+Since the ringbuffer is lockless, there is no need for it to be
+protected by @logbuf_lock. Remove @logbuf_lock.
+
+This means that printk_nmi_direct and printk_safe_flush_on_panic()
+no longer need to acquire any lock to run.
+
+The global variables @syslog_seq, @syslog_partial, @syslog_time
+were also protected by @logbuf_lock. Introduce @syslog_lock to
+protect these.
+
+@console_seq, @exclusive_console_stop_seq, @console_dropped are
+protected by @console_lock.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/internal.h | 4 -
+ kernel/printk/printk.c | 151 +++++++++++---------------------------------
+ kernel/printk/printk_safe.c | 18 -----
+ 3 files changed, 42 insertions(+), 131 deletions(-)
+
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -12,8 +12,6 @@
+
+ #define PRINTK_NMI_CONTEXT_OFFSET 0x010000000
+
+-extern raw_spinlock_t logbuf_lock;
+-
+ __printf(4, 0)
+ int vprintk_store(int facility, int level,
+ const struct dev_printk_info *dev_info,
+@@ -59,7 +57,7 @@ void defer_console_output(void);
+ __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; }
+
+ /*
+- * In !PRINTK builds we still export logbuf_lock spin_lock, console_sem
++ * In !PRINTK builds we still export console_sem
+ * semaphore and some of console functions (console_unlock()/etc.), so
+ * printk-safe must preserve the existing local IRQ guarantees.
+ */
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -355,48 +355,22 @@ enum log_flags {
+ LOG_CONT = 8, /* text is a fragment of a continuation line */
+ };
+
+-/*
+- * The logbuf_lock protects kmsg buffer, indices, counters. This can be taken
+- * within the scheduler's rq lock. It must be released before calling
+- * console_unlock() or anything else that might wake up a process.
+- */
+-DEFINE_RAW_SPINLOCK(logbuf_lock);
+-
+-/*
+- * Helper macros to lock/unlock logbuf_lock and switch between
+- * printk-safe/unsafe modes.
+- */
+-#define logbuf_lock_irq() \
+- do { \
+- printk_safe_enter_irq(); \
+- raw_spin_lock(&logbuf_lock); \
+- } while (0)
+-
+-#define logbuf_unlock_irq() \
+- do { \
+- raw_spin_unlock(&logbuf_lock); \
+- printk_safe_exit_irq(); \
+- } while (0)
+-
+-#define logbuf_lock_irqsave(flags) \
+- do { \
+- printk_safe_enter_irqsave(flags); \
+- raw_spin_lock(&logbuf_lock); \
+- } while (0)
+-
+-#define logbuf_unlock_irqrestore(flags) \
+- do { \
+- raw_spin_unlock(&logbuf_lock); \
+- printk_safe_exit_irqrestore(flags); \
+- } while (0)
++/* The syslog_lock protects syslog_* variables. */
++DEFINE_RAW_SPINLOCK(syslog_lock);
++#define syslog_lock_irq() raw_spin_lock_irq(&syslog_lock)
++#define syslog_unlock_irq() raw_spin_unlock_irq(&syslog_lock)
++#define syslog_lock_irqsave(flags) raw_spin_lock_irqsave(&syslog_lock, flags)
++#define syslog_unlock_irqrestore(flags) raw_spin_unlock_irqrestore(&syslog_lock, flags)
+
+ #ifdef CONFIG_PRINTK
+ DECLARE_WAIT_QUEUE_HEAD(log_wait);
++/* All 3 protected by @syslog_lock. */
+ /* the next printk record to read by syslog(READ) or /proc/kmsg */
+ static u64 syslog_seq;
+ static size_t syslog_partial;
+ static bool syslog_time;
+
++/* All 3 protected by @console_sem. */
+ /* the next printk record to write to the console */
+ static u64 console_seq;
+ static u64 exclusive_console_stop_seq;
+@@ -766,27 +740,22 @@ static ssize_t devkmsg_read(struct file
+ if (ret)
+ return ret;
+
+- logbuf_lock_irq();
+ if (!prb_read_valid(prb, user->seq, r)) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+- logbuf_unlock_irq();
+ goto out;
+ }
+
+- logbuf_unlock_irq();
+ ret = wait_event_interruptible(log_wait,
+ prb_read_valid(prb, user->seq, r));
+ if (ret)
+ goto out;
+- logbuf_lock_irq();
+ }
+
+ if (user->seq < prb_first_valid_seq(prb)) {
+ /* our last seen message is gone, return error and reset */
+ user->seq = prb_first_valid_seq(prb);
+ ret = -EPIPE;
+- logbuf_unlock_irq();
+ goto out;
+ }
+
+@@ -796,7 +765,6 @@ static ssize_t devkmsg_read(struct file
+ &r->info->dev_info);
+
+ user->seq = r->info->seq + 1;
+- logbuf_unlock_irq();
+
+ if (len > count) {
+ ret = -EINVAL;
+@@ -831,7 +799,6 @@ static loff_t devkmsg_llseek(struct file
+ if (offset)
+ return -ESPIPE;
+
+- logbuf_lock_irq();
+ switch (whence) {
+ case SEEK_SET:
+ /* the first record */
+@@ -852,7 +819,6 @@ static loff_t devkmsg_llseek(struct file
+ default:
+ ret = -EINVAL;
+ }
+- logbuf_unlock_irq();
+ return ret;
+ }
+
+@@ -866,7 +832,6 @@ static __poll_t devkmsg_poll(struct file
+
+ poll_wait(file, &log_wait, wait);
+
+- logbuf_lock_irq();
+ if (prb_read_valid(prb, user->seq, NULL)) {
+ /* return error when data has vanished underneath us */
+ if (user->seq < prb_first_valid_seq(prb))
+@@ -874,7 +839,6 @@ static __poll_t devkmsg_poll(struct file
+ else
+ ret = EPOLLIN|EPOLLRDNORM;
+ }
+- logbuf_unlock_irq();
+
+ return ret;
+ }
+@@ -907,9 +871,7 @@ static int devkmsg_open(struct inode *in
+ prb_rec_init_rd(&user->record, &user->info,
+ &user->text_buf[0], sizeof(user->text_buf));
+
+- logbuf_lock_irq();
+ user->seq = prb_first_valid_seq(prb);
+- logbuf_unlock_irq();
+
+ file->private_data = user;
+ return 0;
+@@ -1117,7 +1079,6 @@ void __init setup_log_buf(int early)
+ struct printk_record r;
+ size_t new_descs_size;
+ size_t new_infos_size;
+- unsigned long flags;
+ char *new_log_buf;
+ unsigned int free;
+ u64 seq;
+@@ -1175,8 +1136,6 @@ void __init setup_log_buf(int early)
+ new_descs, ilog2(new_descs_count),
+ new_infos);
+
+- logbuf_lock_irqsave(flags);
+-
+ log_buf_len = new_log_buf_len;
+ log_buf = new_log_buf;
+ new_log_buf_len = 0;
+@@ -1192,8 +1151,6 @@ void __init setup_log_buf(int early)
+ */
+ prb = &printk_rb_dynamic;
+
+- logbuf_unlock_irqrestore(flags);
+-
+ if (seq != prb_next_seq(&printk_rb_static)) {
+ pr_err("dropped %llu messages\n",
+ prb_next_seq(&printk_rb_static) - seq);
+@@ -1469,9 +1426,9 @@ static int syslog_print(char __user *buf
+ size_t n;
+ size_t skip;
+
+- logbuf_lock_irq();
++ syslog_lock_irq();
+ if (!prb_read_valid(prb, syslog_seq, &r)) {
+- logbuf_unlock_irq();
++ syslog_unlock_irq();
+ break;
+ }
+ if (r.info->seq != syslog_seq) {
+@@ -1500,7 +1457,7 @@ static int syslog_print(char __user *buf
+ syslog_partial += n;
+ } else
+ n = 0;
+- logbuf_unlock_irq();
++ syslog_unlock_irq();
+
+ if (!n)
+ break;
+@@ -1525,6 +1482,7 @@ static int syslog_print_all(char __user
+ struct printk_info info;
+ unsigned int line_count;
+ struct printk_record r;
++ u64 newest_seq;
+ u64 clr_seq;
+ char *text;
+ int len = 0;
+@@ -1536,19 +1494,30 @@ static int syslog_print_all(char __user
+ return -ENOMEM;
+
+ time = printk_time;
+- logbuf_lock_irq();
+ clr_seq = atomic64_read(&clear_seq);
+
+ /*
+ * Find first record that fits, including all following records,
+ * into the user-provided buffer for this dump.
+ */
++
+ prb_for_each_info(clr_seq, prb, seq, &info, &line_count)
+ len += get_record_print_text_size(&info, line_count, true, time);
+
+- /* move first record forward until length fits into the buffer */
++ /*
++ * Keep track of the latest in case new records are coming in fast
++ * and overwriting the older records.
++ */
++ newest_seq = seq;
++
++ /*
++ * Move first record forward until length fits into the buffer. This
++ * is a best effort attempt. If @newest_seq is reached because the
++ * ringbuffer is wrapping too fast, just start filling the buffer
++ * from there.
++ */
+ prb_for_each_info(clr_seq, prb, seq, &info, &line_count) {
+- if (len <= size)
++ if (len <= size || info.seq > newest_seq)
+ break;
+ len -= get_record_print_text_size(&info, line_count, true, time);
+ }
+@@ -1566,12 +1535,10 @@ static int syslog_print_all(char __user
+ break;
+ }
+
+- logbuf_unlock_irq();
+ if (copy_to_user(buf + len, text, textlen))
+ len = -EFAULT;
+ else
+ len += textlen;
+- logbuf_lock_irq();
+
+ if (len < 0)
+ break;
+@@ -1579,7 +1546,6 @@ static int syslog_print_all(char __user
+
+ if (clear)
+ atomic64_set(&clear_seq, seq);
+- logbuf_unlock_irq();
+
+ kfree(text);
+ return len;
+@@ -1587,9 +1553,7 @@ static int syslog_print_all(char __user
+
+ static void syslog_clear(void)
+ {
+- logbuf_lock_irq();
+ atomic64_set(&clear_seq, prb_next_seq(prb));
+- logbuf_unlock_irq();
+ }
+
+ int do_syslog(int type, char __user *buf, int len, int source)
+@@ -1597,6 +1561,7 @@ int do_syslog(int type, char __user *buf
+ bool clear = false;
+ static int saved_console_loglevel = LOGLEVEL_DEFAULT;
+ int error;
++ u64 seq;
+
+ error = check_syslog_permissions(type, source);
+ if (error)
+@@ -1614,8 +1579,11 @@ int do_syslog(int type, char __user *buf
+ return 0;
+ if (!access_ok(buf, len))
+ return -EFAULT;
++ syslog_lock_irq();
++ seq = syslog_seq;
++ syslog_unlock_irq();
+ error = wait_event_interruptible(log_wait,
+- prb_read_valid(prb, syslog_seq, NULL));
++ prb_read_valid(prb, seq, NULL));
+ if (error)
+ return error;
+ error = syslog_print(buf, len);
+@@ -1663,7 +1631,7 @@ int do_syslog(int type, char __user *buf
+ break;
+ /* Number of chars in the log buffer */
+ case SYSLOG_ACTION_SIZE_UNREAD:
+- logbuf_lock_irq();
++ syslog_lock_irq();
+ if (syslog_seq < prb_first_valid_seq(prb)) {
+ /* messages are gone, move to first one */
+ syslog_seq = prb_first_valid_seq(prb);
+@@ -1690,7 +1658,7 @@ int do_syslog(int type, char __user *buf
+ }
+ error -= syslog_partial;
+ }
+- logbuf_unlock_irq();
++ syslog_unlock_irq();
+ break;
+ /* Size of the log buffer */
+ case SYSLOG_ACTION_SIZE_BUFFER:
+@@ -2065,7 +2033,6 @@ static size_t log_output(int facility, i
+ dev_info, text, text_len);
+ }
+
+-/* Must be called under logbuf_lock. */
+ int vprintk_store(int facility, int level,
+ const struct dev_printk_info *dev_info,
+ const char *fmt, va_list args)
+@@ -2131,7 +2098,6 @@ asmlinkage int vprintk_emit(int facility
+ {
+ int printed_len;
+ bool in_sched = false;
+- unsigned long flags;
+
+ /* Suppress unimportant messages after panic happens */
+ if (unlikely(suppress_printk))
+@@ -2146,9 +2112,7 @@ asmlinkage int vprintk_emit(int facility
+ printk_delay();
+
+ /* This stops the holder of console_sem just where we want him */
+- logbuf_lock_irqsave(flags);
+ printed_len = vprintk_store(facility, level, dev_info, fmt, args);
+- logbuf_unlock_irqrestore(flags);
+
+ /* If called from the scheduler, we can not call up(). */
+ if (!in_sched) {
+@@ -2580,7 +2544,6 @@ void console_unlock(void)
+ size_t len;
+
+ printk_safe_enter_irqsave(flags);
+- raw_spin_lock(&logbuf_lock);
+ skip:
+ if (!prb_read_valid(prb, console_seq, &r))
+ break;
+@@ -2624,7 +2587,6 @@ void console_unlock(void)
+ console_msg_format & MSG_FORMAT_SYSLOG,
+ printk_time);
+ console_seq++;
+- raw_spin_unlock(&logbuf_lock);
+
+ /*
+ * While actively printing out messages, if another printk()
+@@ -2651,8 +2613,6 @@ void console_unlock(void)
+
+ console_locked = 0;
+
+- raw_spin_unlock(&logbuf_lock);
+-
+ up_console_sem();
+
+ /*
+@@ -2661,9 +2621,7 @@ void console_unlock(void)
+ * there's a new owner and the console_unlock() from them will do the
+ * flush, no worries.
+ */
+- raw_spin_lock(&logbuf_lock);
+ retry = prb_read_valid(prb, console_seq, NULL);
+- raw_spin_unlock(&logbuf_lock);
+ printk_safe_exit_irqrestore(flags);
+
+ if (retry && console_trylock())
+@@ -2727,13 +2685,8 @@ void console_flush_on_panic(enum con_flu
+ console_trylock();
+ console_may_schedule = 0;
+
+- if (mode == CONSOLE_REPLAY_ALL) {
+- unsigned long flags;
+-
+- logbuf_lock_irqsave(flags);
++ if (mode == CONSOLE_REPLAY_ALL)
+ console_seq = prb_first_valid_seq(prb);
+- logbuf_unlock_irqrestore(flags);
+- }
+ console_unlock();
+ }
+
+@@ -2957,11 +2910,7 @@ void register_console(struct console *ne
+ nr_ext_console_drivers++;
+
+ if (newcon->flags & CON_PRINTBUFFER) {
+- /*
+- * console_unlock(); will print out the buffered messages
+- * for us.
+- */
+- logbuf_lock_irqsave(flags);
++ syslog_lock_irqsave(flags);
+ /*
+ * We're about to replay the log buffer. Only do this to the
+ * just-registered console to avoid excessive message spam to
+@@ -2974,7 +2923,7 @@ void register_console(struct console *ne
+ exclusive_console = newcon;
+ exclusive_console_stop_seq = console_seq;
+ console_seq = syslog_seq;
+- logbuf_unlock_irqrestore(flags);
++ syslog_unlock_irqrestore(flags);
+ }
+ console_unlock();
+ console_sysfs_notify();
+@@ -3340,7 +3289,6 @@ EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
+ void kmsg_dump(enum kmsg_dump_reason reason)
+ {
+ struct kmsg_dumper *dumper;
+- unsigned long flags;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(dumper, &dump_list, list) {
+@@ -3360,10 +3308,7 @@ void kmsg_dump(enum kmsg_dump_reason rea
+ /* initialize iterator with data about the stored records */
+ dumper->active = true;
+
+- logbuf_lock_irqsave(flags);
+- dumper->cur_seq = atomic64_read(&clear_seq);
+- dumper->next_seq = prb_next_seq(prb);
+- logbuf_unlock_irqrestore(flags);
++ kmsg_dump_rewind_nolock(dumper);
+
+ /* invoke dumper which will iterate over records */
+ dumper->dump(dumper, reason);
+@@ -3450,14 +3395,7 @@ bool kmsg_dump_get_line_nolock(struct km
+ bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+ char *line, size_t size, size_t *len)
+ {
+- unsigned long flags;
+- bool ret;
+-
+- logbuf_lock_irqsave(flags);
+- ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
+- logbuf_unlock_irqrestore(flags);
+-
+- return ret;
++ return kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
+ }
+ EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
+
+@@ -3486,7 +3424,6 @@ bool kmsg_dump_get_buffer(struct kmsg_du
+ struct printk_info info;
+ unsigned int line_count;
+ struct printk_record r;
+- unsigned long flags;
+ u64 seq;
+ u64 next_seq;
+ size_t len = 0;
+@@ -3496,17 +3433,14 @@ bool kmsg_dump_get_buffer(struct kmsg_du
+ if (!dumper->active || !buf || !size)
+ goto out;
+
+- logbuf_lock_irqsave(flags);
+ if (dumper->cur_seq < prb_first_valid_seq(prb)) {
+ /* messages are gone, move to first available one */
+ dumper->cur_seq = prb_first_valid_seq(prb);
+ }
+
+ /* last entry */
+- if (dumper->cur_seq >= dumper->next_seq) {
+- logbuf_unlock_irqrestore(flags);
++ if (dumper->cur_seq >= dumper->next_seq)
+ goto out;
+- }
+
+ /*
+ * Find first record that fits, including all following records,
+@@ -3549,7 +3483,6 @@ bool kmsg_dump_get_buffer(struct kmsg_du
+
+ dumper->next_seq = next_seq;
+ ret = true;
+- logbuf_unlock_irqrestore(flags);
+ out:
+ if (len_out)
+ *len_out = len;
+@@ -3564,8 +3497,6 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
+ * Reset the dumper's iterator so that kmsg_dump_get_line() and
+ * kmsg_dump_get_buffer() can be called again and used multiple
+ * times within the same dumper.dump() callback.
+- *
+- * The function is similar to kmsg_dump_rewind(), but grabs no locks.
+ */
+ void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
+ {
+@@ -3583,11 +3514,7 @@ void kmsg_dump_rewind_nolock(struct kmsg
+ */
+ void kmsg_dump_rewind(struct kmsg_dumper *dumper)
+ {
+- unsigned long flags;
+-
+- logbuf_lock_irqsave(flags);
+ kmsg_dump_rewind_nolock(dumper);
+- logbuf_unlock_irqrestore(flags);
+ }
+ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
+
+--- a/kernel/printk/printk_safe.c
++++ b/kernel/printk/printk_safe.c
+@@ -266,18 +266,6 @@ void printk_safe_flush(void)
+ */
+ void printk_safe_flush_on_panic(void)
+ {
+- /*
+- * Make sure that we could access the main ring buffer.
+- * Do not risk a double release when more CPUs are up.
+- */
+- if (raw_spin_is_locked(&logbuf_lock)) {
+- if (num_online_cpus() > 1)
+- return;
+-
+- debug_locks_off();
+- raw_spin_lock_init(&logbuf_lock);
+- }
+-
+ printk_safe_flush();
+ }
+
+@@ -371,17 +359,15 @@ void __printk_safe_exit(void)
+ * Try to use the main logbuf even in NMI. But avoid calling console
+ * drivers that might have their own locks.
+ */
+- if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) &&
+- raw_spin_trylock(&logbuf_lock)) {
++ if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK)) {
+ int len;
+
+ len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
+- raw_spin_unlock(&logbuf_lock);
+ defer_console_output();
+ return len;
+ }
+
+- /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
++ /* Use extra buffer in NMI or in safe mode. */
+ if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
+ return vprintk_nmi(fmt, args);
+
diff --git a/debian/patches-rt/0004-printk-use-the-lockless-ringbuffer.patch b/debian/patches-rt/0004-printk-use-the-lockless-ringbuffer.patch
new file mode 100644
index 000000000..4c4d0649a
--- /dev/null
+++ b/debian/patches-rt/0004-printk-use-the-lockless-ringbuffer.patch
@@ -0,0 +1,1520 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Thu, 9 Jul 2020 15:29:44 +0206
+Subject: [PATCH 04/25] printk: use the lockless ringbuffer
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+Replace the existing ringbuffer usage and implementation with
+lockless ringbuffer usage. Even though the new ringbuffer does not
+require locking, all existing locking is left in place. Therefore,
+this change is purely replacing the underlining ringbuffer.
+
+Changes that exist due to the ringbuffer replacement:
+
+- The VMCOREINFO has been updated for the new structures.
+
+- Dictionary data is now stored in a separate data buffer from the
+ human-readable messages. The dictionary data buffer is set to the
+ same size as the message buffer. Therefore, the total required
+ memory for both dictionary and message data is
+ 2 * (2 ^ CONFIG_LOG_BUF_SHIFT) for the initial static buffers and
+ 2 * log_buf_len (the kernel parameter) for the dynamic buffers.
+
+- Record meta-data is now stored in a separate array of descriptors.
+ This is an additional 72 * (2 ^ (CONFIG_LOG_BUF_SHIFT - 5)) bytes
+ for the static array and 72 * (log_buf_len >> 5) bytes for the
+ dynamic array.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200709132344.760-5-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 946 +++++++++++++++++++++++++------------------------
+ 1 file changed, 496 insertions(+), 450 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -55,6 +55,7 @@
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/printk.h>
+
++#include "printk_ringbuffer.h"
+ #include "console_cmdline.h"
+ #include "braille.h"
+ #include "internal.h"
+@@ -294,30 +295,24 @@ enum con_msg_format_flags {
+ static int console_msg_format = MSG_FORMAT_DEFAULT;
+
+ /*
+- * The printk log buffer consists of a chain of concatenated variable
+- * length records. Every record starts with a record header, containing
+- * the overall length of the record.
+- *
+- * The heads to the first and last entry in the buffer, as well as the
+- * sequence numbers of these entries are maintained when messages are
+- * stored.
+- *
+- * If the heads indicate available messages, the length in the header
+- * tells the start next message. A length == 0 for the next message
+- * indicates a wrap-around to the beginning of the buffer.
+- *
+- * Every record carries the monotonic timestamp in microseconds, as well as
+- * the standard userspace syslog level and syslog facility. The usual
+- * kernel messages use LOG_KERN; userspace-injected messages always carry
+- * a matching syslog facility, by default LOG_USER. The origin of every
+- * message can be reliably determined that way.
+- *
+- * The human readable log message directly follows the message header. The
+- * length of the message text is stored in the header, the stored message
+- * is not terminated.
+- *
+- * Optionally, a message can carry a dictionary of properties (key/value pairs),
+- * to provide userspace with a machine-readable message context.
++ * The printk log buffer consists of a sequenced collection of records, each
++ * containing variable length message and dictionary text. Every record
++ * also contains its own meta-data (@info).
++ *
++ * Every record meta-data carries the timestamp in microseconds, as well as
++ * the standard userspace syslog level and syslog facility. The usual kernel
++ * messages use LOG_KERN; userspace-injected messages always carry a matching
++ * syslog facility, by default LOG_USER. The origin of every message can be
++ * reliably determined that way.
++ *
++ * The human readable log message of a record is available in @text, the
++ * length of the message text in @text_len. The stored message is not
++ * terminated.
++ *
++ * Optionally, a record can carry a dictionary of properties (key/value
++ * pairs), to provide userspace with a machine-readable message context. The
++ * length of the dictionary is available in @dict_len. The dictionary is not
++ * terminated.
+ *
+ * Examples for well-defined, commonly used property names are:
+ * DEVICE=b12:8 device identifier
+@@ -331,21 +326,19 @@ static int console_msg_format = MSG_FORM
+ * follows directly after a '=' character. Every property is terminated by
+ * a '\0' character. The last property is not terminated.
+ *
+- * Example of a message structure:
+- * 0000 ff 8f 00 00 00 00 00 00 monotonic time in nsec
+- * 0008 34 00 record is 52 bytes long
+- * 000a 0b 00 text is 11 bytes long
+- * 000c 1f 00 dictionary is 23 bytes long
+- * 000e 03 00 LOG_KERN (facility) LOG_ERR (level)
+- * 0010 69 74 27 73 20 61 20 6c "it's a l"
+- * 69 6e 65 "ine"
+- * 001b 44 45 56 49 43 "DEVIC"
+- * 45 3d 62 38 3a 32 00 44 "E=b8:2\0D"
+- * 52 49 56 45 52 3d 62 75 "RIVER=bu"
+- * 67 "g"
+- * 0032 00 00 00 padding to next message header
++ * Example of record values:
++ * record.text_buf = "it's a line" (unterminated)
++ * record.dict_buf = "DEVICE=b8:2\0DRIVER=bug" (unterminated)
++ * record.info.seq = 56
++ * record.info.ts_nsec = 36863
++ * record.info.text_len = 11
++ * record.info.dict_len = 22
++ * record.info.facility = 0 (LOG_KERN)
++ * record.info.flags = 0
++ * record.info.level = 3 (LOG_ERR)
++ * record.info.caller_id = 299 (task 299)
+ *
+- * The 'struct printk_log' buffer header must never be directly exported to
++ * The 'struct printk_info' buffer must never be directly exported to
+ * userspace, it is a kernel-private implementation detail that might
+ * need to be changed in the future, when the requirements change.
+ *
+@@ -365,23 +358,6 @@ enum log_flags {
+ LOG_CONT = 8, /* text is a fragment of a continuation line */
+ };
+
+-struct printk_log {
+- u64 ts_nsec; /* timestamp in nanoseconds */
+- u16 len; /* length of entire record */
+- u16 text_len; /* length of text buffer */
+- u16 dict_len; /* length of dictionary buffer */
+- u8 facility; /* syslog facility */
+- u8 flags:5; /* internal record flags */
+- u8 level:3; /* syslog level */
+-#ifdef CONFIG_PRINTK_CALLER
+- u32 caller_id; /* thread id or processor id */
+-#endif
+-}
+-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+-__packed __aligned(4)
+-#endif
+-;
+-
+ /*
+ * The logbuf_lock protects kmsg buffer, indices, counters. This can be taken
+ * within the scheduler's rq lock. It must be released before calling
+@@ -421,26 +397,16 @@ DEFINE_RAW_SPINLOCK(logbuf_lock);
+ DECLARE_WAIT_QUEUE_HEAD(log_wait);
+ /* the next printk record to read by syslog(READ) or /proc/kmsg */
+ static u64 syslog_seq;
+-static u32 syslog_idx;
+ static size_t syslog_partial;
+ static bool syslog_time;
+
+-/* index and sequence number of the first record stored in the buffer */
+-static u64 log_first_seq;
+-static u32 log_first_idx;
+-
+-/* index and sequence number of the next record to store in the buffer */
+-static u64 log_next_seq;
+-static u32 log_next_idx;
+-
+ /* the next printk record to write to the console */
+ static u64 console_seq;
+-static u32 console_idx;
+ static u64 exclusive_console_stop_seq;
++static unsigned long console_dropped;
+
+ /* the next printk record to read after the last 'clear' command */
+ static u64 clear_seq;
+-static u32 clear_idx;
+
+ #ifdef CONFIG_PRINTK_CALLER
+ #define PREFIX_MAX 48
+@@ -453,7 +419,7 @@ static u32 clear_idx;
+ #define LOG_FACILITY(v) ((v) >> 3 & 0xff)
+
+ /* record buffer */
+-#define LOG_ALIGN __alignof__(struct printk_log)
++#define LOG_ALIGN __alignof__(unsigned long)
+ #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
+ #define LOG_BUF_LEN_MAX (u32)(1 << 31)
+ static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
+@@ -461,6 +427,24 @@ static char *log_buf = __log_buf;
+ static u32 log_buf_len = __LOG_BUF_LEN;
+
+ /*
++ * Define the average message size. This only affects the number of
++ * descriptors that will be available. Underestimating is better than
++ * overestimating (too many available descriptors is better than not enough).
++ * The dictionary buffer will be the same size as the text buffer.
++ */
++#define PRB_AVGBITS 5 /* 32 character average length */
++
++#if CONFIG_LOG_BUF_SHIFT <= PRB_AVGBITS
++#error CONFIG_LOG_BUF_SHIFT value too small.
++#endif
++_DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS,
++ PRB_AVGBITS, PRB_AVGBITS, &__log_buf[0]);
++
++static struct printk_ringbuffer printk_rb_dynamic;
++
++static struct printk_ringbuffer *prb = &printk_rb_static;
++
++/*
+ * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
+ * per_cpu_areas are initialised. This variable is set to true when
+ * it's safe to access per-CPU data.
+@@ -484,108 +468,6 @@ u32 log_buf_len_get(void)
+ return log_buf_len;
+ }
+
+-/* human readable text of the record */
+-static char *log_text(const struct printk_log *msg)
+-{
+- return (char *)msg + sizeof(struct printk_log);
+-}
+-
+-/* optional key/value pair dictionary attached to the record */
+-static char *log_dict(const struct printk_log *msg)
+-{
+- return (char *)msg + sizeof(struct printk_log) + msg->text_len;
+-}
+-
+-/* get record by index; idx must point to valid msg */
+-static struct printk_log *log_from_idx(u32 idx)
+-{
+- struct printk_log *msg = (struct printk_log *)(log_buf + idx);
+-
+- /*
+- * A length == 0 record is the end of buffer marker. Wrap around and
+- * read the message at the start of the buffer.
+- */
+- if (!msg->len)
+- return (struct printk_log *)log_buf;
+- return msg;
+-}
+-
+-/* get next record; idx must point to valid msg */
+-static u32 log_next(u32 idx)
+-{
+- struct printk_log *msg = (struct printk_log *)(log_buf + idx);
+-
+- /* length == 0 indicates the end of the buffer; wrap */
+- /*
+- * A length == 0 record is the end of buffer marker. Wrap around and
+- * read the message at the start of the buffer as *this* one, and
+- * return the one after that.
+- */
+- if (!msg->len) {
+- msg = (struct printk_log *)log_buf;
+- return msg->len;
+- }
+- return idx + msg->len;
+-}
+-
+-/*
+- * Check whether there is enough free space for the given message.
+- *
+- * The same values of first_idx and next_idx mean that the buffer
+- * is either empty or full.
+- *
+- * If the buffer is empty, we must respect the position of the indexes.
+- * They cannot be reset to the beginning of the buffer.
+- */
+-static int logbuf_has_space(u32 msg_size, bool empty)
+-{
+- u32 free;
+-
+- if (log_next_idx > log_first_idx || empty)
+- free = max(log_buf_len - log_next_idx, log_first_idx);
+- else
+- free = log_first_idx - log_next_idx;
+-
+- /*
+- * We need space also for an empty header that signalizes wrapping
+- * of the buffer.
+- */
+- return free >= msg_size + sizeof(struct printk_log);
+-}
+-
+-static int log_make_free_space(u32 msg_size)
+-{
+- while (log_first_seq < log_next_seq &&
+- !logbuf_has_space(msg_size, false)) {
+- /* drop old messages until we have enough contiguous space */
+- log_first_idx = log_next(log_first_idx);
+- log_first_seq++;
+- }
+-
+- if (clear_seq < log_first_seq) {
+- clear_seq = log_first_seq;
+- clear_idx = log_first_idx;
+- }
+-
+- /* sequence numbers are equal, so the log buffer is empty */
+- if (logbuf_has_space(msg_size, log_first_seq == log_next_seq))
+- return 0;
+-
+- return -ENOMEM;
+-}
+-
+-/* compute the message size including the padding bytes */
+-static u32 msg_used_size(u16 text_len, u16 dict_len, u32 *pad_len)
+-{
+- u32 size;
+-
+- size = sizeof(struct printk_log) + text_len + dict_len;
+- *pad_len = (-size) & (LOG_ALIGN - 1);
+- size += *pad_len;
+-
+- return size;
+-}
+-
+ /*
+ * Define how much of the log buffer we could take at maximum. The value
+ * must be greater than two. Note that only half of the buffer is available
+@@ -594,22 +476,23 @@ static u32 msg_used_size(u16 text_len, u
+ #define MAX_LOG_TAKE_PART 4
+ static const char trunc_msg[] = "<truncated>";
+
+-static u32 truncate_msg(u16 *text_len, u16 *trunc_msg_len,
+- u16 *dict_len, u32 *pad_len)
++static void truncate_msg(u16 *text_len, u16 *trunc_msg_len)
+ {
+ /*
+ * The message should not take the whole buffer. Otherwise, it might
+ * get removed too soon.
+ */
+ u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART;
++
+ if (*text_len > max_text_len)
+ *text_len = max_text_len;
+- /* enable the warning message */
++
++ /* enable the warning message (if there is room) */
+ *trunc_msg_len = strlen(trunc_msg);
+- /* disable the "dict" completely */
+- *dict_len = 0;
+- /* compute the size again, count also the warning message */
+- return msg_used_size(*text_len + *trunc_msg_len, 0, pad_len);
++ if (*text_len >= *trunc_msg_len)
++ *text_len -= *trunc_msg_len;
++ else
++ *trunc_msg_len = 0;
+ }
+
+ /* insert record into the buffer, discard old ones, update heads */
+@@ -618,60 +501,40 @@ static int log_store(u32 caller_id, int
+ const char *dict, u16 dict_len,
+ const char *text, u16 text_len)
+ {
+- struct printk_log *msg;
+- u32 size, pad_len;
++ struct prb_reserved_entry e;
++ struct printk_record r;
+ u16 trunc_msg_len = 0;
+
+- /* number of '\0' padding bytes to next message */
+- size = msg_used_size(text_len, dict_len, &pad_len);
++ prb_rec_init_wr(&r, text_len, dict_len);
+
+- if (log_make_free_space(size)) {
++ if (!prb_reserve(&e, prb, &r)) {
+ /* truncate the message if it is too long for empty buffer */
+- size = truncate_msg(&text_len, &trunc_msg_len,
+- &dict_len, &pad_len);
++ truncate_msg(&text_len, &trunc_msg_len);
++ prb_rec_init_wr(&r, text_len + trunc_msg_len, dict_len);
+ /* survive when the log buffer is too small for trunc_msg */
+- if (log_make_free_space(size))
++ if (!prb_reserve(&e, prb, &r))
+ return 0;
+ }
+
+- if (log_next_idx + size + sizeof(struct printk_log) > log_buf_len) {
+- /*
+- * This message + an additional empty header does not fit
+- * at the end of the buffer. Add an empty header with len == 0
+- * to signify a wrap around.
+- */
+- memset(log_buf + log_next_idx, 0, sizeof(struct printk_log));
+- log_next_idx = 0;
+- }
+-
+ /* fill message */
+- msg = (struct printk_log *)(log_buf + log_next_idx);
+- memcpy(log_text(msg), text, text_len);
+- msg->text_len = text_len;
+- if (trunc_msg_len) {
+- memcpy(log_text(msg) + text_len, trunc_msg, trunc_msg_len);
+- msg->text_len += trunc_msg_len;
+- }
+- memcpy(log_dict(msg), dict, dict_len);
+- msg->dict_len = dict_len;
+- msg->facility = facility;
+- msg->level = level & 7;
+- msg->flags = flags & 0x1f;
++ memcpy(&r.text_buf[0], text, text_len);
++ if (trunc_msg_len)
++ memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len);
++ if (r.dict_buf)
++ memcpy(&r.dict_buf[0], dict, dict_len);
++ r.info->facility = facility;
++ r.info->level = level & 7;
++ r.info->flags = flags & 0x1f;
+ if (ts_nsec > 0)
+- msg->ts_nsec = ts_nsec;
++ r.info->ts_nsec = ts_nsec;
+ else
+- msg->ts_nsec = local_clock();
+-#ifdef CONFIG_PRINTK_CALLER
+- msg->caller_id = caller_id;
+-#endif
+- memset(log_dict(msg) + dict_len, 0, pad_len);
+- msg->len = size;
++ r.info->ts_nsec = local_clock();
++ r.info->caller_id = caller_id;
+
+ /* insert message */
+- log_next_idx += msg->len;
+- log_next_seq++;
++ prb_commit(&e);
+
+- return msg->text_len;
++ return (text_len + trunc_msg_len);
+ }
+
+ int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
+@@ -723,13 +586,13 @@ static void append_char(char **pp, char
+ *(*pp)++ = c;
+ }
+
+-static ssize_t msg_print_ext_header(char *buf, size_t size,
+- struct printk_log *msg, u64 seq)
++static ssize_t info_print_ext_header(char *buf, size_t size,
++ struct printk_info *info)
+ {
+- u64 ts_usec = msg->ts_nsec;
++ u64 ts_usec = info->ts_nsec;
+ char caller[20];
+ #ifdef CONFIG_PRINTK_CALLER
+- u32 id = msg->caller_id;
++ u32 id = info->caller_id;
+
+ snprintf(caller, sizeof(caller), ",caller=%c%u",
+ id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
+@@ -740,8 +603,8 @@ static ssize_t msg_print_ext_header(char
+ do_div(ts_usec, 1000);
+
+ return scnprintf(buf, size, "%u,%llu,%llu,%c%s;",
+- (msg->facility << 3) | msg->level, seq, ts_usec,
+- msg->flags & LOG_CONT ? 'c' : '-', caller);
++ (info->facility << 3) | info->level, info->seq,
++ ts_usec, info->flags & LOG_CONT ? 'c' : '-', caller);
+ }
+
+ static ssize_t msg_print_ext_body(char *buf, size_t size,
+@@ -795,10 +658,14 @@ static ssize_t msg_print_ext_body(char *
+ /* /dev/kmsg - userspace message inject/listen interface */
+ struct devkmsg_user {
+ u64 seq;
+- u32 idx;
+ struct ratelimit_state rs;
+ struct mutex lock;
+ char buf[CONSOLE_EXT_LOG_MAX];
++
++ struct printk_info info;
++ char text_buf[CONSOLE_EXT_LOG_MAX];
++ char dict_buf[CONSOLE_EXT_LOG_MAX];
++ struct printk_record record;
+ };
+
+ static __printf(3, 4) __cold
+@@ -881,7 +748,7 @@ static ssize_t devkmsg_read(struct file
+ size_t count, loff_t *ppos)
+ {
+ struct devkmsg_user *user = file->private_data;
+- struct printk_log *msg;
++ struct printk_record *r = &user->record;
+ size_t len;
+ ssize_t ret;
+
+@@ -893,7 +760,7 @@ static ssize_t devkmsg_read(struct file
+ return ret;
+
+ logbuf_lock_irq();
+- while (user->seq == log_next_seq) {
++ if (!prb_read_valid(prb, user->seq, r)) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ logbuf_unlock_irq();
+@@ -902,30 +769,26 @@ static ssize_t devkmsg_read(struct file
+
+ logbuf_unlock_irq();
+ ret = wait_event_interruptible(log_wait,
+- user->seq != log_next_seq);
++ prb_read_valid(prb, user->seq, r));
+ if (ret)
+ goto out;
+ logbuf_lock_irq();
+ }
+
+- if (user->seq < log_first_seq) {
++ if (user->seq < prb_first_valid_seq(prb)) {
+ /* our last seen message is gone, return error and reset */
+- user->idx = log_first_idx;
+- user->seq = log_first_seq;
++ user->seq = prb_first_valid_seq(prb);
+ ret = -EPIPE;
+ logbuf_unlock_irq();
+ goto out;
+ }
+
+- msg = log_from_idx(user->idx);
+- len = msg_print_ext_header(user->buf, sizeof(user->buf),
+- msg, user->seq);
++ len = info_print_ext_header(user->buf, sizeof(user->buf), r->info);
+ len += msg_print_ext_body(user->buf + len, sizeof(user->buf) - len,
+- log_dict(msg), msg->dict_len,
+- log_text(msg), msg->text_len);
++ &r->dict_buf[0], r->info->dict_len,
++ &r->text_buf[0], r->info->text_len);
+
+- user->idx = log_next(user->idx);
+- user->seq++;
++ user->seq = r->info->seq + 1;
+ logbuf_unlock_irq();
+
+ if (len > count) {
+@@ -965,8 +828,7 @@ static loff_t devkmsg_llseek(struct file
+ switch (whence) {
+ case SEEK_SET:
+ /* the first record */
+- user->idx = log_first_idx;
+- user->seq = log_first_seq;
++ user->seq = prb_first_valid_seq(prb);
+ break;
+ case SEEK_DATA:
+ /*
+@@ -974,13 +836,11 @@ static loff_t devkmsg_llseek(struct file
+ * like issued by 'dmesg -c'. Reading /dev/kmsg itself
+ * changes no global state, and does not clear anything.
+ */
+- user->idx = clear_idx;
+ user->seq = clear_seq;
+ break;
+ case SEEK_END:
+ /* after the last record */
+- user->idx = log_next_idx;
+- user->seq = log_next_seq;
++ user->seq = prb_next_seq(prb);
+ break;
+ default:
+ ret = -EINVAL;
+@@ -1000,9 +860,9 @@ static __poll_t devkmsg_poll(struct file
+ poll_wait(file, &log_wait, wait);
+
+ logbuf_lock_irq();
+- if (user->seq < log_next_seq) {
++ if (prb_read_valid(prb, user->seq, NULL)) {
+ /* return error when data has vanished underneath us */
+- if (user->seq < log_first_seq)
++ if (user->seq < prb_first_valid_seq(prb))
+ ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
+ else
+ ret = EPOLLIN|EPOLLRDNORM;
+@@ -1037,9 +897,12 @@ static int devkmsg_open(struct inode *in
+
+ mutex_init(&user->lock);
+
++ prb_rec_init_rd(&user->record, &user->info,
++ &user->text_buf[0], sizeof(user->text_buf),
++ &user->dict_buf[0], sizeof(user->dict_buf));
++
+ logbuf_lock_irq();
+- user->idx = log_first_idx;
+- user->seq = log_first_seq;
++ user->seq = prb_first_valid_seq(prb);
+ logbuf_unlock_irq();
+
+ file->private_data = user;
+@@ -1080,23 +943,52 @@ const struct file_operations kmsg_fops =
+ */
+ void log_buf_vmcoreinfo_setup(void)
+ {
+- VMCOREINFO_SYMBOL(log_buf);
+- VMCOREINFO_SYMBOL(log_buf_len);
+- VMCOREINFO_SYMBOL(log_first_idx);
+- VMCOREINFO_SYMBOL(clear_idx);
+- VMCOREINFO_SYMBOL(log_next_idx);
++ VMCOREINFO_SYMBOL(prb);
++ VMCOREINFO_SYMBOL(printk_rb_static);
++ VMCOREINFO_SYMBOL(clear_seq);
++
+ /*
+- * Export struct printk_log size and field offsets. User space tools can
++ * Export struct size and field offsets. User space tools can
+ * parse it and detect any changes to structure down the line.
+ */
+- VMCOREINFO_STRUCT_SIZE(printk_log);
+- VMCOREINFO_OFFSET(printk_log, ts_nsec);
+- VMCOREINFO_OFFSET(printk_log, len);
+- VMCOREINFO_OFFSET(printk_log, text_len);
+- VMCOREINFO_OFFSET(printk_log, dict_len);
+-#ifdef CONFIG_PRINTK_CALLER
+- VMCOREINFO_OFFSET(printk_log, caller_id);
+-#endif
++
++ VMCOREINFO_STRUCT_SIZE(printk_ringbuffer);
++ VMCOREINFO_OFFSET(printk_ringbuffer, desc_ring);
++ VMCOREINFO_OFFSET(printk_ringbuffer, text_data_ring);
++ VMCOREINFO_OFFSET(printk_ringbuffer, dict_data_ring);
++ VMCOREINFO_OFFSET(printk_ringbuffer, fail);
++
++ VMCOREINFO_STRUCT_SIZE(prb_desc_ring);
++ VMCOREINFO_OFFSET(prb_desc_ring, count_bits);
++ VMCOREINFO_OFFSET(prb_desc_ring, descs);
++ VMCOREINFO_OFFSET(prb_desc_ring, head_id);
++ VMCOREINFO_OFFSET(prb_desc_ring, tail_id);
++
++ VMCOREINFO_STRUCT_SIZE(prb_desc);
++ VMCOREINFO_OFFSET(prb_desc, info);
++ VMCOREINFO_OFFSET(prb_desc, state_var);
++ VMCOREINFO_OFFSET(prb_desc, text_blk_lpos);
++ VMCOREINFO_OFFSET(prb_desc, dict_blk_lpos);
++
++ VMCOREINFO_STRUCT_SIZE(prb_data_blk_lpos);
++ VMCOREINFO_OFFSET(prb_data_blk_lpos, begin);
++ VMCOREINFO_OFFSET(prb_data_blk_lpos, next);
++
++ VMCOREINFO_STRUCT_SIZE(printk_info);
++ VMCOREINFO_OFFSET(printk_info, seq);
++ VMCOREINFO_OFFSET(printk_info, ts_nsec);
++ VMCOREINFO_OFFSET(printk_info, text_len);
++ VMCOREINFO_OFFSET(printk_info, dict_len);
++ VMCOREINFO_OFFSET(printk_info, caller_id);
++
++ VMCOREINFO_STRUCT_SIZE(prb_data_ring);
++ VMCOREINFO_OFFSET(prb_data_ring, size_bits);
++ VMCOREINFO_OFFSET(prb_data_ring, data);
++ VMCOREINFO_OFFSET(prb_data_ring, head_lpos);
++ VMCOREINFO_OFFSET(prb_data_ring, tail_lpos);
++
++ VMCOREINFO_SIZE(atomic_long_t);
++ VMCOREINFO_TYPE_OFFSET(atomic_long_t, counter);
+ }
+ #endif
+
+@@ -1174,11 +1066,48 @@ static void __init set_percpu_data_ready
+ __printk_percpu_data_ready = true;
+ }
+
++static unsigned int __init add_to_rb(struct printk_ringbuffer *rb,
++ struct printk_record *r)
++{
++ struct prb_reserved_entry e;
++ struct printk_record dest_r;
++
++ prb_rec_init_wr(&dest_r, r->info->text_len, r->info->dict_len);
++
++ if (!prb_reserve(&e, rb, &dest_r))
++ return 0;
++
++ memcpy(&dest_r.text_buf[0], &r->text_buf[0], dest_r.text_buf_size);
++ if (dest_r.dict_buf) {
++ memcpy(&dest_r.dict_buf[0], &r->dict_buf[0],
++ dest_r.dict_buf_size);
++ }
++ dest_r.info->facility = r->info->facility;
++ dest_r.info->level = r->info->level;
++ dest_r.info->flags = r->info->flags;
++ dest_r.info->ts_nsec = r->info->ts_nsec;
++ dest_r.info->caller_id = r->info->caller_id;
++
++ prb_commit(&e);
++
++ return prb_record_text_space(&e);
++}
++
++static char setup_text_buf[CONSOLE_EXT_LOG_MAX] __initdata;
++static char setup_dict_buf[CONSOLE_EXT_LOG_MAX] __initdata;
++
+ void __init setup_log_buf(int early)
+ {
++ unsigned int new_descs_count;
++ struct prb_desc *new_descs;
++ struct printk_info info;
++ struct printk_record r;
++ size_t new_descs_size;
+ unsigned long flags;
++ char *new_dict_buf;
+ char *new_log_buf;
+ unsigned int free;
++ u64 seq;
+
+ /*
+ * Some archs call setup_log_buf() multiple times - first is very
+@@ -1197,21 +1126,70 @@ void __init setup_log_buf(int early)
+ if (!new_log_buf_len)
+ return;
+
++ new_descs_count = new_log_buf_len >> PRB_AVGBITS;
++ if (new_descs_count == 0) {
++ pr_err("new_log_buf_len: %lu too small\n", new_log_buf_len);
++ return;
++ }
++
+ new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN);
+ if (unlikely(!new_log_buf)) {
+- pr_err("log_buf_len: %lu bytes not available\n",
+- new_log_buf_len);
++ pr_err("log_buf_len: %lu text bytes not available\n",
++ new_log_buf_len);
++ return;
++ }
++
++ new_dict_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN);
++ if (unlikely(!new_dict_buf)) {
++ pr_err("log_buf_len: %lu dict bytes not available\n",
++ new_log_buf_len);
++ memblock_free(__pa(new_log_buf), new_log_buf_len);
+ return;
+ }
+
++ new_descs_size = new_descs_count * sizeof(struct prb_desc);
++ new_descs = memblock_alloc(new_descs_size, LOG_ALIGN);
++ if (unlikely(!new_descs)) {
++ pr_err("log_buf_len: %zu desc bytes not available\n",
++ new_descs_size);
++ memblock_free(__pa(new_dict_buf), new_log_buf_len);
++ memblock_free(__pa(new_log_buf), new_log_buf_len);
++ return;
++ }
++
++ prb_rec_init_rd(&r, &info,
++ &setup_text_buf[0], sizeof(setup_text_buf),
++ &setup_dict_buf[0], sizeof(setup_dict_buf));
++
++ prb_init(&printk_rb_dynamic,
++ new_log_buf, ilog2(new_log_buf_len),
++ new_dict_buf, ilog2(new_log_buf_len),
++ new_descs, ilog2(new_descs_count));
++
+ logbuf_lock_irqsave(flags);
++
+ log_buf_len = new_log_buf_len;
+ log_buf = new_log_buf;
+ new_log_buf_len = 0;
+- free = __LOG_BUF_LEN - log_next_idx;
+- memcpy(log_buf, __log_buf, __LOG_BUF_LEN);
++
++ free = __LOG_BUF_LEN;
++ prb_for_each_record(0, &printk_rb_static, seq, &r)
++ free -= add_to_rb(&printk_rb_dynamic, &r);
++
++ /*
++ * This is early enough that everything is still running on the
++ * boot CPU and interrupts are disabled. So no new messages will
++ * appear during the transition to the dynamic buffer.
++ */
++ prb = &printk_rb_dynamic;
++
+ logbuf_unlock_irqrestore(flags);
+
++ if (seq != prb_next_seq(&printk_rb_static)) {
++ pr_err("dropped %llu messages\n",
++ prb_next_seq(&printk_rb_static) - seq);
++ }
++
+ pr_info("log_buf_len: %u bytes\n", log_buf_len);
+ pr_info("early log buf free: %u(%u%%)\n",
+ free, (free * 100) / __LOG_BUF_LEN);
+@@ -1321,18 +1299,18 @@ static size_t print_caller(u32 id, char
+ #define print_caller(id, buf) 0
+ #endif
+
+-static size_t print_prefix(const struct printk_log *msg, bool syslog,
+- bool time, char *buf)
++static size_t info_print_prefix(const struct printk_info *info, bool syslog,
++ bool time, char *buf)
+ {
+ size_t len = 0;
+
+ if (syslog)
+- len = print_syslog((msg->facility << 3) | msg->level, buf);
++ len = print_syslog((info->facility << 3) | info->level, buf);
+
+ if (time)
+- len += print_time(msg->ts_nsec, buf + len);
++ len += print_time(info->ts_nsec, buf + len);
+
+- len += print_caller(msg->caller_id, buf + len);
++ len += print_caller(info->caller_id, buf + len);
+
+ if (IS_ENABLED(CONFIG_PRINTK_CALLER) || time) {
+ buf[len++] = ' ';
+@@ -1342,72 +1320,143 @@ static size_t print_prefix(const struct
+ return len;
+ }
+
+-static size_t msg_print_text(const struct printk_log *msg, bool syslog,
+- bool time, char *buf, size_t size)
+-{
+- const char *text = log_text(msg);
+- size_t text_size = msg->text_len;
+- size_t len = 0;
++/*
++ * Prepare the record for printing. The text is shifted within the given
++ * buffer to avoid a need for another one. The following operations are
++ * done:
++ *
++ * - Add prefix for each line.
++ * - Add the trailing newline that has been removed in vprintk_store().
++ * - Drop truncated lines that do not longer fit into the buffer.
++ *
++ * Return: The length of the updated/prepared text, including the added
++ * prefixes and the newline. The dropped line(s) are not counted.
++ */
++static size_t record_print_text(struct printk_record *r, bool syslog,
++ bool time)
++{
++ size_t text_len = r->info->text_len;
++ size_t buf_size = r->text_buf_size;
++ char *text = r->text_buf;
+ char prefix[PREFIX_MAX];
+- const size_t prefix_len = print_prefix(msg, syslog, time, prefix);
++ bool truncated = false;
++ size_t prefix_len;
++ size_t line_len;
++ size_t len = 0;
++ char *next;
+
+- do {
+- const char *next = memchr(text, '\n', text_size);
+- size_t text_len;
++ prefix_len = info_print_prefix(r->info, syslog, time, prefix);
+
++ /*
++ * @text_len: bytes of unprocessed text
++ * @line_len: bytes of current line _without_ newline
++ * @text: pointer to beginning of current line
++ * @len: number of bytes prepared in r->text_buf
++ */
++ for (;;) {
++ next = memchr(text, '\n', text_len);
+ if (next) {
+- text_len = next - text;
+- next++;
+- text_size -= next - text;
++ line_len = next - text;
+ } else {
+- text_len = text_size;
++ /* Drop truncated line(s). */
++ if (truncated)
++ break;
++ line_len = text_len;
+ }
+
+- if (buf) {
+- if (prefix_len + text_len + 1 >= size - len)
++ /*
++ * Truncate the text if there is not enough space to add the
++ * prefix and a trailing newline.
++ */
++ if (len + prefix_len + text_len + 1 > buf_size) {
++ /* Drop even the current line if no space. */
++ if (len + prefix_len + line_len + 1 > buf_size)
+ break;
+
+- memcpy(buf + len, prefix, prefix_len);
+- len += prefix_len;
+- memcpy(buf + len, text, text_len);
+- len += text_len;
+- buf[len++] = '\n';
+- } else {
+- /* SYSLOG_ACTION_* buffer size only calculation */
+- len += prefix_len + text_len + 1;
++ text_len = buf_size - len - prefix_len - 1;
++ truncated = true;
++ }
++
++ memmove(text + prefix_len, text, text_len);
++ memcpy(text, prefix, prefix_len);
++
++ len += prefix_len + line_len + 1;
++
++ if (text_len == line_len) {
++ /*
++ * Add the trailing newline removed in
++ * vprintk_store().
++ */
++ text[prefix_len + line_len] = '\n';
++ break;
+ }
+
+- text = next;
+- } while (text);
++ /*
++ * Advance beyond the added prefix and the related line with
++ * its newline.
++ */
++ text += prefix_len + line_len + 1;
++
++ /*
++ * The remaining text has only decreased by the line with its
++ * newline.
++ *
++ * Note that @text_len can become zero. It happens when @text
++ * ended with a newline (either due to truncation or the
++ * original string ending with "\n\n"). The loop is correctly
++ * repeated and (if not truncated) an empty line with a prefix
++ * will be prepared.
++ */
++ text_len -= line_len + 1;
++ }
+
+ return len;
+ }
+
++static size_t get_record_print_text_size(struct printk_info *info,
++ unsigned int line_count,
++ bool syslog, bool time)
++{
++ char prefix[PREFIX_MAX];
++ size_t prefix_len;
++
++ prefix_len = info_print_prefix(info, syslog, time, prefix);
++
++ /*
++ * Each line will be preceded with a prefix. The intermediate
++ * newlines are already within the text, but a final trailing
++ * newline will be added.
++ */
++ return ((prefix_len * line_count) + info->text_len + 1);
++}
++
+ static int syslog_print(char __user *buf, int size)
+ {
++ struct printk_info info;
++ struct printk_record r;
+ char *text;
+- struct printk_log *msg;
+ int len = 0;
+
+ text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
+ if (!text)
+ return -ENOMEM;
+
++ prb_rec_init_rd(&r, &info, text, LOG_LINE_MAX + PREFIX_MAX, NULL, 0);
++
+ while (size > 0) {
+ size_t n;
+ size_t skip;
+
+ logbuf_lock_irq();
+- if (syslog_seq < log_first_seq) {
+- /* messages are gone, move to first one */
+- syslog_seq = log_first_seq;
+- syslog_idx = log_first_idx;
+- syslog_partial = 0;
+- }
+- if (syslog_seq == log_next_seq) {
++ if (!prb_read_valid(prb, syslog_seq, &r)) {
+ logbuf_unlock_irq();
+ break;
+ }
++ if (r.info->seq != syslog_seq) {
++ /* message is gone, move to next valid one */
++ syslog_seq = r.info->seq;
++ syslog_partial = 0;
++ }
+
+ /*
+ * To keep reading/counting partial line consistent,
+@@ -1417,13 +1466,10 @@ static int syslog_print(char __user *buf
+ syslog_time = printk_time;
+
+ skip = syslog_partial;
+- msg = log_from_idx(syslog_idx);
+- n = msg_print_text(msg, true, syslog_time, text,
+- LOG_LINE_MAX + PREFIX_MAX);
++ n = record_print_text(&r, true, syslog_time);
+ if (n - syslog_partial <= size) {
+ /* message fits into buffer, move forward */
+- syslog_idx = log_next(syslog_idx);
+- syslog_seq++;
++ syslog_seq = r.info->seq + 1;
+ n -= syslog_partial;
+ syslog_partial = 0;
+ } else if (!len){
+@@ -1454,11 +1500,12 @@ static int syslog_print(char __user *buf
+
+ static int syslog_print_all(char __user *buf, int size, bool clear)
+ {
++ struct printk_info info;
++ unsigned int line_count;
++ struct printk_record r;
+ char *text;
+ int len = 0;
+- u64 next_seq;
+ u64 seq;
+- u32 idx;
+ bool time;
+
+ text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
+@@ -1471,38 +1518,28 @@ static int syslog_print_all(char __user
+ * Find first record that fits, including all following records,
+ * into the user-provided buffer for this dump.
+ */
+- seq = clear_seq;
+- idx = clear_idx;
+- while (seq < log_next_seq) {
+- struct printk_log *msg = log_from_idx(idx);
+-
+- len += msg_print_text(msg, true, time, NULL, 0);
+- idx = log_next(idx);
+- seq++;
+- }
++ prb_for_each_info(clear_seq, prb, seq, &info, &line_count)
++ len += get_record_print_text_size(&info, line_count, true, time);
+
+ /* move first record forward until length fits into the buffer */
+- seq = clear_seq;
+- idx = clear_idx;
+- while (len > size && seq < log_next_seq) {
+- struct printk_log *msg = log_from_idx(idx);
+-
+- len -= msg_print_text(msg, true, time, NULL, 0);
+- idx = log_next(idx);
+- seq++;
++ prb_for_each_info(clear_seq, prb, seq, &info, &line_count) {
++ if (len <= size)
++ break;
++ len -= get_record_print_text_size(&info, line_count, true, time);
+ }
+
+- /* last message fitting into this dump */
+- next_seq = log_next_seq;
++ prb_rec_init_rd(&r, &info, text, LOG_LINE_MAX + PREFIX_MAX, NULL, 0);
+
+ len = 0;
+- while (len >= 0 && seq < next_seq) {
+- struct printk_log *msg = log_from_idx(idx);
+- int textlen = msg_print_text(msg, true, time, text,
+- LOG_LINE_MAX + PREFIX_MAX);
++ prb_for_each_record(seq, prb, seq, &r) {
++ int textlen;
+
+- idx = log_next(idx);
+- seq++;
++ textlen = record_print_text(&r, true, time);
++
++ if (len + textlen > size) {
++ seq--;
++ break;
++ }
+
+ logbuf_unlock_irq();
+ if (copy_to_user(buf + len, text, textlen))
+@@ -1511,17 +1548,12 @@ static int syslog_print_all(char __user
+ len += textlen;
+ logbuf_lock_irq();
+
+- if (seq < log_first_seq) {
+- /* messages are gone, move to next one */
+- seq = log_first_seq;
+- idx = log_first_idx;
+- }
++ if (len < 0)
++ break;
+ }
+
+- if (clear) {
+- clear_seq = log_next_seq;
+- clear_idx = log_next_idx;
+- }
++ if (clear)
++ clear_seq = seq;
+ logbuf_unlock_irq();
+
+ kfree(text);
+@@ -1531,8 +1563,7 @@ static int syslog_print_all(char __user
+ static void syslog_clear(void)
+ {
+ logbuf_lock_irq();
+- clear_seq = log_next_seq;
+- clear_idx = log_next_idx;
++ clear_seq = prb_next_seq(prb);
+ logbuf_unlock_irq();
+ }
+
+@@ -1559,7 +1590,7 @@ int do_syslog(int type, char __user *buf
+ if (!access_ok(buf, len))
+ return -EFAULT;
+ error = wait_event_interruptible(log_wait,
+- syslog_seq != log_next_seq);
++ prb_read_valid(prb, syslog_seq, NULL));
+ if (error)
+ return error;
+ error = syslog_print(buf, len);
+@@ -1608,10 +1639,9 @@ int do_syslog(int type, char __user *buf
+ /* Number of chars in the log buffer */
+ case SYSLOG_ACTION_SIZE_UNREAD:
+ logbuf_lock_irq();
+- if (syslog_seq < log_first_seq) {
++ if (syslog_seq < prb_first_valid_seq(prb)) {
+ /* messages are gone, move to first one */
+- syslog_seq = log_first_seq;
+- syslog_idx = log_first_idx;
++ syslog_seq = prb_first_valid_seq(prb);
+ syslog_partial = 0;
+ }
+ if (source == SYSLOG_FROM_PROC) {
+@@ -1620,20 +1650,18 @@ int do_syslog(int type, char __user *buf
+ * for pending data, not the size; return the count of
+ * records, not the length.
+ */
+- error = log_next_seq - syslog_seq;
++ error = prb_next_seq(prb) - syslog_seq;
+ } else {
+- u64 seq = syslog_seq;
+- u32 idx = syslog_idx;
+ bool time = syslog_partial ? syslog_time : printk_time;
+-
+- while (seq < log_next_seq) {
+- struct printk_log *msg = log_from_idx(idx);
+-
+- error += msg_print_text(msg, true, time, NULL,
+- 0);
++ struct printk_info info;
++ unsigned int line_count;
++ u64 seq;
++
++ prb_for_each_info(syslog_seq, prb, seq, &info,
++ &line_count) {
++ error += get_record_print_text_size(&info, line_count,
++ true, time);
+ time = printk_time;
+- idx = log_next(idx);
+- seq++;
+ }
+ error -= syslog_partial;
+ }
+@@ -1804,10 +1832,22 @@ static int console_trylock_spinning(void
+ static void call_console_drivers(const char *ext_text, size_t ext_len,
+ const char *text, size_t len)
+ {
++ static char dropped_text[64];
++ size_t dropped_len = 0;
+ struct console *con;
+
+ trace_console_rcuidle(text, len);
+
++ if (!console_drivers)
++ return;
++
++ if (console_dropped) {
++ dropped_len = snprintf(dropped_text, sizeof(dropped_text),
++ "** %lu printk messages dropped **\n",
++ console_dropped);
++ console_dropped = 0;
++ }
++
+ for_each_console(con) {
+ if (exclusive_console && con != exclusive_console)
+ continue;
+@@ -1820,8 +1860,11 @@ static void call_console_drivers(const c
+ continue;
+ if (con->flags & CON_EXTENDED)
+ con->write(con, ext_text, ext_len);
+- else
++ else {
++ if (dropped_len)
++ con->write(con, dropped_text, dropped_len);
+ con->write(con, text, len);
++ }
+ }
+ }
+
+@@ -2084,21 +2127,24 @@ EXPORT_SYMBOL(printk);
+ #define PREFIX_MAX 0
+ #define printk_time false
+
++#define prb_read_valid(rb, seq, r) false
++#define prb_first_valid_seq(rb) 0
++
+ static u64 syslog_seq;
+-static u32 syslog_idx;
+ static u64 console_seq;
+-static u32 console_idx;
+ static u64 exclusive_console_stop_seq;
+-static u64 log_first_seq;
+-static u32 log_first_idx;
+-static u64 log_next_seq;
+-static char *log_text(const struct printk_log *msg) { return NULL; }
+-static char *log_dict(const struct printk_log *msg) { return NULL; }
+-static struct printk_log *log_from_idx(u32 idx) { return NULL; }
+-static u32 log_next(u32 idx) { return 0; }
+-static ssize_t msg_print_ext_header(char *buf, size_t size,
+- struct printk_log *msg,
+- u64 seq) { return 0; }
++static unsigned long console_dropped;
++
++static size_t record_print_text(const struct printk_record *r,
++ bool syslog, bool time)
++{
++ return 0;
++}
++static ssize_t info_print_ext_header(char *buf, size_t size,
++ struct printk_info *info)
++{
++ return 0;
++}
+ static ssize_t msg_print_ext_body(char *buf, size_t size,
+ char *dict, size_t dict_len,
+ char *text, size_t text_len) { return 0; }
+@@ -2106,8 +2152,6 @@ static void console_lock_spinning_enable
+ static int console_lock_spinning_disable_and_check(void) { return 0; }
+ static void call_console_drivers(const char *ext_text, size_t ext_len,
+ const char *text, size_t len) {}
+-static size_t msg_print_text(const struct printk_log *msg, bool syslog,
+- bool time, char *buf, size_t size) { return 0; }
+ static bool suppress_message_printing(int level) { return false; }
+
+ #endif /* CONFIG_PRINTK */
+@@ -2392,14 +2436,19 @@ void console_unlock(void)
+ {
+ static char ext_text[CONSOLE_EXT_LOG_MAX];
+ static char text[LOG_LINE_MAX + PREFIX_MAX];
++ static char dict[LOG_LINE_MAX];
+ unsigned long flags;
+ bool do_cond_resched, retry;
++ struct printk_info info;
++ struct printk_record r;
+
+ if (console_suspended) {
+ up_console_sem();
+ return;
+ }
+
++ prb_rec_init_rd(&r, &info, text, sizeof(text), dict, sizeof(dict));
++
+ /*
+ * Console drivers are called with interrupts disabled, so
+ * @console_may_schedule should be cleared before; however, we may
+@@ -2430,35 +2479,26 @@ void console_unlock(void)
+ }
+
+ for (;;) {
+- struct printk_log *msg;
+ size_t ext_len = 0;
+ size_t len;
+
+ printk_safe_enter_irqsave(flags);
+ raw_spin_lock(&logbuf_lock);
+- if (console_seq < log_first_seq) {
+- len = snprintf(text, sizeof(text),
+- "** %llu printk messages dropped **\n",
+- log_first_seq - console_seq);
+-
+- /* messages are gone, move to first one */
+- console_seq = log_first_seq;
+- console_idx = log_first_idx;
+- } else {
+- len = 0;
+- }
+ skip:
+- if (console_seq == log_next_seq)
++ if (!prb_read_valid(prb, console_seq, &r))
+ break;
+
+- msg = log_from_idx(console_idx);
+- if (suppress_message_printing(msg->level)) {
++ if (console_seq != r.info->seq) {
++ console_dropped += r.info->seq - console_seq;
++ console_seq = r.info->seq;
++ }
++
++ if (suppress_message_printing(r.info->level)) {
+ /*
+ * Skip record we have buffered and already printed
+ * directly to the console when we received it, and
+ * record that has level above the console loglevel.
+ */
+- console_idx = log_next(console_idx);
+ console_seq++;
+ goto skip;
+ }
+@@ -2469,19 +2509,24 @@ void console_unlock(void)
+ exclusive_console = NULL;
+ }
+
+- len += msg_print_text(msg,
+- console_msg_format & MSG_FORMAT_SYSLOG,
+- printk_time, text + len, sizeof(text) - len);
++ /*
++ * Handle extended console text first because later
++ * record_print_text() will modify the record buffer in-place.
++ */
+ if (nr_ext_console_drivers) {
+- ext_len = msg_print_ext_header(ext_text,
++ ext_len = info_print_ext_header(ext_text,
+ sizeof(ext_text),
+- msg, console_seq);
++ r.info);
+ ext_len += msg_print_ext_body(ext_text + ext_len,
+ sizeof(ext_text) - ext_len,
+- log_dict(msg), msg->dict_len,
+- log_text(msg), msg->text_len);
++ &r.dict_buf[0],
++ r.info->dict_len,
++ &r.text_buf[0],
++ r.info->text_len);
+ }
+- console_idx = log_next(console_idx);
++ len = record_print_text(&r,
++ console_msg_format & MSG_FORMAT_SYSLOG,
++ printk_time);
+ console_seq++;
+ raw_spin_unlock(&logbuf_lock);
+
+@@ -2521,7 +2566,7 @@ void console_unlock(void)
+ * flush, no worries.
+ */
+ raw_spin_lock(&logbuf_lock);
+- retry = console_seq != log_next_seq;
++ retry = prb_read_valid(prb, console_seq, NULL);
+ raw_spin_unlock(&logbuf_lock);
+ printk_safe_exit_irqrestore(flags);
+
+@@ -2590,8 +2635,7 @@ void console_flush_on_panic(enum con_flu
+ unsigned long flags;
+
+ logbuf_lock_irqsave(flags);
+- console_seq = log_first_seq;
+- console_idx = log_first_idx;
++ console_seq = prb_first_valid_seq(prb);
+ logbuf_unlock_irqrestore(flags);
+ }
+ console_unlock();
+@@ -2834,7 +2878,6 @@ void register_console(struct console *ne
+ exclusive_console = newcon;
+ exclusive_console_stop_seq = console_seq;
+ console_seq = syslog_seq;
+- console_idx = syslog_idx;
+ logbuf_unlock_irqrestore(flags);
+ }
+ console_unlock();
+@@ -3223,9 +3266,7 @@ void kmsg_dump(enum kmsg_dump_reason rea
+
+ logbuf_lock_irqsave(flags);
+ dumper->cur_seq = clear_seq;
+- dumper->cur_idx = clear_idx;
+- dumper->next_seq = log_next_seq;
+- dumper->next_idx = log_next_idx;
++ dumper->next_seq = prb_next_seq(prb);
+ logbuf_unlock_irqrestore(flags);
+
+ /* invoke dumper which will iterate over records */
+@@ -3259,28 +3300,33 @@ void kmsg_dump(enum kmsg_dump_reason rea
+ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
+ char *line, size_t size, size_t *len)
+ {
+- struct printk_log *msg;
++ struct printk_info info;
++ unsigned int line_count;
++ struct printk_record r;
+ size_t l = 0;
+ bool ret = false;
+
++ prb_rec_init_rd(&r, &info, line, size, NULL, 0);
++
+ if (!dumper->active)
+ goto out;
+
+- if (dumper->cur_seq < log_first_seq) {
+- /* messages are gone, move to first available one */
+- dumper->cur_seq = log_first_seq;
+- dumper->cur_idx = log_first_idx;
+- }
+-
+- /* last entry */
+- if (dumper->cur_seq >= log_next_seq)
+- goto out;
++ /* Read text or count text lines? */
++ if (line) {
++ if (!prb_read_valid(prb, dumper->cur_seq, &r))
++ goto out;
++ l = record_print_text(&r, syslog, printk_time);
++ } else {
++ if (!prb_read_valid_info(prb, dumper->cur_seq,
++ &info, &line_count)) {
++ goto out;
++ }
++ l = get_record_print_text_size(&info, line_count, syslog,
++ printk_time);
+
+- msg = log_from_idx(dumper->cur_idx);
+- l = msg_print_text(msg, syslog, printk_time, line, size);
++ }
+
+- dumper->cur_idx = log_next(dumper->cur_idx);
+- dumper->cur_seq++;
++ dumper->cur_seq = r.info->seq + 1;
+ ret = true;
+ out:
+ if (len)
+@@ -3341,23 +3387,25 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
+ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+ char *buf, size_t size, size_t *len)
+ {
++ struct printk_info info;
++ unsigned int line_count;
++ struct printk_record r;
+ unsigned long flags;
+ u64 seq;
+- u32 idx;
+ u64 next_seq;
+- u32 next_idx;
+ size_t l = 0;
+ bool ret = false;
+ bool time = printk_time;
+
+- if (!dumper->active)
++ prb_rec_init_rd(&r, &info, buf, size, NULL, 0);
++
++ if (!dumper->active || !buf || !size)
+ goto out;
+
+ logbuf_lock_irqsave(flags);
+- if (dumper->cur_seq < log_first_seq) {
++ if (dumper->cur_seq < prb_first_valid_seq(prb)) {
+ /* messages are gone, move to first available one */
+- dumper->cur_seq = log_first_seq;
+- dumper->cur_idx = log_first_idx;
++ dumper->cur_seq = prb_first_valid_seq(prb);
+ }
+
+ /* last entry */
+@@ -3368,41 +3416,41 @@ bool kmsg_dump_get_buffer(struct kmsg_du
+
+ /* calculate length of entire buffer */
+ seq = dumper->cur_seq;
+- idx = dumper->cur_idx;
+- while (seq < dumper->next_seq) {
+- struct printk_log *msg = log_from_idx(idx);
+-
+- l += msg_print_text(msg, true, time, NULL, 0);
+- idx = log_next(idx);
+- seq++;
++ while (prb_read_valid_info(prb, seq, &info, &line_count)) {
++ if (r.info->seq >= dumper->next_seq)
++ break;
++ l += get_record_print_text_size(&info, line_count, true, time);
++ seq = r.info->seq + 1;
+ }
+
+ /* move first record forward until length fits into the buffer */
+ seq = dumper->cur_seq;
+- idx = dumper->cur_idx;
+- while (l >= size && seq < dumper->next_seq) {
+- struct printk_log *msg = log_from_idx(idx);
+-
+- l -= msg_print_text(msg, true, time, NULL, 0);
+- idx = log_next(idx);
+- seq++;
++ while (l >= size && prb_read_valid_info(prb, seq,
++ &info, &line_count)) {
++ if (r.info->seq >= dumper->next_seq)
++ break;
++ l -= get_record_print_text_size(&info, line_count, true, time);
++ seq = r.info->seq + 1;
+ }
+
+ /* last message in next interation */
+ next_seq = seq;
+- next_idx = idx;
+
++ /* actually read text into the buffer now */
+ l = 0;
+- while (seq < dumper->next_seq) {
+- struct printk_log *msg = log_from_idx(idx);
++ while (prb_read_valid(prb, seq, &r)) {
++ if (r.info->seq >= dumper->next_seq)
++ break;
++
++ l += record_print_text(&r, syslog, time);
++
++ /* adjust record to store to remaining buffer space */
++ prb_rec_init_rd(&r, &info, buf + l, size - l, NULL, 0);
+
+- l += msg_print_text(msg, syslog, time, buf + l, size - l);
+- idx = log_next(idx);
+- seq++;
++ seq = r.info->seq + 1;
+ }
+
+ dumper->next_seq = next_seq;
+- dumper->next_idx = next_idx;
+ ret = true;
+ logbuf_unlock_irqrestore(flags);
+ out:
+@@ -3425,9 +3473,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
+ void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
+ {
+ dumper->cur_seq = clear_seq;
+- dumper->cur_idx = clear_idx;
+- dumper->next_seq = log_next_seq;
+- dumper->next_idx = log_next_idx;
++ dumper->next_seq = prb_next_seq(prb);
+ }
+
+ /**
diff --git a/debian/patches-rt/0004-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch b/debian/patches-rt/0004-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch
index 326390b79..7bb979621 100644
--- a/debian/patches-rt/0004-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch
+++ b/debian/patches-rt/0004-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 5 Oct 2020 16:57:21 +0200
-Subject: [PATCH 04/17] sched/core: Wait for tasks being pushed away on hotplug
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Date: Fri, 23 Oct 2020 12:12:02 +0200
+Subject: [PATCH 04/19] sched/core: Wait for tasks being pushed away on hotplug
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
RT kernels need to ensure that all tasks which are not per CPU kthreads
have left the outgoing CPU to guarantee that no tasks are force migrated
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -6897,8 +6897,21 @@ static bool balance_push(struct rq *rq)
+@@ -6896,8 +6896,21 @@ static void balance_push(struct rq *rq)
* Both the cpu-hotplug and stop task are in this case and are
* required to complete the hotplug process.
*/
@@ -42,12 +42,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ rcuwait_wake_up(&rq->hotplug_wait);
+ raw_spin_lock(&rq->lock);
+ }
- return false;
+ return;
+ }
get_task_struct(push_task);
/*
-@@ -6931,6 +6944,20 @@ static void balance_push_set(int cpu, bo
+@@ -6928,6 +6941,20 @@ static void balance_push_set(int cpu, bo
rq_unlock_irqrestore(rq, &rf);
}
@@ -67,8 +67,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
#else
- static inline bool balance_push(struct rq *rq)
-@@ -6942,6 +6969,10 @@ static void balance_push_set(int cpu, bo
+ static inline void balance_push(struct rq *rq)
+@@ -6938,6 +6965,10 @@ static inline void balance_push_set(int
{
}
@@ -79,7 +79,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* CONFIG_HOTPLUG_CPU */
void set_rq_online(struct rq *rq)
-@@ -7096,6 +7127,10 @@ int sched_cpu_deactivate(unsigned int cp
+@@ -7092,6 +7123,10 @@ int sched_cpu_deactivate(unsigned int cp
return ret;
}
sched_domains_numa_masks_clear(cpu);
@@ -90,7 +90,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -7336,6 +7371,9 @@ void __init sched_init(void)
+@@ -7332,6 +7367,9 @@ void __init sched_init(void)
rq_csd_init(rq, &rq->nohz_csd, nohz_csd_func);
#endif
diff --git a/debian/patches-rt/0004-time-sched_clock-Use-seqcount_latch_t.patch b/debian/patches-rt/0004-time-sched_clock-Use-seqcount_latch_t.patch
index 531fa0a9c..cdbc3c23b 100644
--- a/debian/patches-rt/0004-time-sched_clock-Use-seqcount_latch_t.patch
+++ b/debian/patches-rt/0004-time-sched_clock-Use-seqcount_latch_t.patch
@@ -1,7 +1,7 @@
From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
Date: Thu, 27 Aug 2020 13:40:40 +0200
Subject: [PATCH 04/13] time/sched_clock: Use seqcount_latch_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Latch sequence counters have unique read and write APIs, and thus
seqcount_latch_t was recently introduced at seqlock.h.
diff --git a/debian/patches-rt/0005-MAINTAIERS-Add-John-Ogness-as-printk-reviewer.patch b/debian/patches-rt/0005-MAINTAIERS-Add-John-Ogness-as-printk-reviewer.patch
new file mode 100644
index 000000000..83a93f0d3
--- /dev/null
+++ b/debian/patches-rt/0005-MAINTAIERS-Add-John-Ogness-as-printk-reviewer.patch
@@ -0,0 +1,29 @@
+From: Petr Mladek <pmladek@suse.com>
+Date: Fri, 10 Jul 2020 11:44:32 +0200
+Subject: [PATCH 05/25] MAINTAIERS: Add John Ogness as printk reviewer
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+John Ogness has started major rework of the printk code. Add him
+as reviewer so that he is aware of all other coming changes and
+could influence their integration.
+
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Acked-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Acked-by: John Ogness <john.ogness@linutronix.de>
+Link: https://lore.kernel.org/r/20200710094432.19655-1-pmladek@suse.com
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ MAINTAINERS | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -13960,6 +13960,7 @@ PRINTK
+ M: Petr Mladek <pmladek@suse.com>
+ M: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+ R: Steven Rostedt <rostedt@goodmis.org>
++R: John Ogness <john.ogness@linutronix.de>
+ S: Maintained
+ F: include/linux/printk.h
+ F: kernel/printk/
diff --git a/debian/patches-rt/0005-locking-rtmutex-Handle-the-various-new-futex-race-co.patch b/debian/patches-rt/0005-locking-rtmutex-Handle-the-various-new-futex-race-co.patch
index c6a18eb3d..95c3165a9 100644
--- a/debian/patches-rt/0005-locking-rtmutex-Handle-the-various-new-futex-race-co.patch
+++ b/debian/patches-rt/0005-locking-rtmutex-Handle-the-various-new-futex-race-co.patch
@@ -1,8 +1,8 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 10 Jun 2011 11:04:15 +0200
-Subject: [PATCH 05/23] locking/rtmutex: Handle the various new futex race
+Subject: [PATCH 05/22] locking/rtmutex: Handle the various new futex race
conditions
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
RT opens a few new interesting race conditions in the rtmutex/futex
combo due to futex hash bucket lock being a 'sleeping' spinlock and
diff --git a/debian/patches-rt/0005-printk-rb-add-basic-non-blocking-reading-interface.patch b/debian/patches-rt/0005-printk-rb-add-basic-non-blocking-reading-interface.patch
deleted file mode 100644
index a929a64d5..000000000
--- a/debian/patches-rt/0005-printk-rb-add-basic-non-blocking-reading-interface.patch
+++ /dev/null
@@ -1,260 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:43 +0100
-Subject: [PATCH 05/25] printk-rb: add basic non-blocking reading interface
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-Add reader iterator static declaration/initializer, dynamic
-initializer, and functions to iterate and retrieve ring buffer data.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/printk_ringbuffer.h | 20 ++++
- lib/printk_ringbuffer.c | 190 ++++++++++++++++++++++++++++++++++++++
- 2 files changed, 210 insertions(+)
-
---- a/include/linux/printk_ringbuffer.h
-+++ b/include/linux/printk_ringbuffer.h
-@@ -43,6 +43,19 @@ static struct prb_cpulock name = { \
- .irqflags = &_##name##_percpu_irqflags, \
- }
-
-+#define PRB_INIT ((unsigned long)-1)
-+
-+#define DECLARE_STATIC_PRINTKRB_ITER(name, rbaddr) \
-+static struct prb_iterator name = { \
-+ .rb = rbaddr, \
-+ .lpos = PRB_INIT, \
-+}
-+
-+struct prb_iterator {
-+ struct printk_ringbuffer *rb;
-+ unsigned long lpos;
-+};
-+
- #define DECLARE_STATIC_PRINTKRB(name, szbits, cpulockptr) \
- static char _##name##_buffer[1 << (szbits)] \
- __aligned(__alignof__(long)); \
-@@ -62,6 +75,13 @@ char *prb_reserve(struct prb_handle *h,
- unsigned int size);
- void prb_commit(struct prb_handle *h);
-
-+/* reader interface */
-+void prb_iter_init(struct prb_iterator *iter, struct printk_ringbuffer *rb,
-+ u64 *seq);
-+void prb_iter_copy(struct prb_iterator *dest, struct prb_iterator *src);
-+int prb_iter_next(struct prb_iterator *iter, char *buf, int size, u64 *seq);
-+int prb_iter_data(struct prb_iterator *iter, char *buf, int size, u64 *seq);
-+
- /* utility functions */
- void prb_lock(struct prb_cpulock *cpu_lock, unsigned int *cpu_store);
- void prb_unlock(struct prb_cpulock *cpu_lock, unsigned int cpu_store);
---- a/lib/printk_ringbuffer.c
-+++ b/lib/printk_ringbuffer.c
-@@ -1,5 +1,7 @@
- // SPDX-License-Identifier: GPL-2.0
- #include <linux/smp.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
- #include <linux/printk_ringbuffer.h>
-
- #define PRB_SIZE(rb) (1 << rb->size_bits)
-@@ -8,6 +10,7 @@
- #define PRB_WRAPS(rb, lpos) (lpos >> rb->size_bits)
- #define PRB_WRAP_LPOS(rb, lpos, xtra) \
- ((PRB_WRAPS(rb, lpos) + xtra) << rb->size_bits)
-+#define PRB_DATA_SIZE(e) (e->size - sizeof(struct prb_entry))
- #define PRB_DATA_ALIGN sizeof(long)
-
- static bool __prb_trylock(struct prb_cpulock *cpu_lock,
-@@ -247,3 +250,190 @@ char *prb_reserve(struct prb_handle *h,
-
- return &h->entry->data[0];
- }
-+
-+/*
-+ * prb_iter_copy: Copy an iterator.
-+ * @dest: The iterator to copy to.
-+ * @src: The iterator to copy from.
-+ *
-+ * Make a deep copy of an iterator. This is particularly useful for making
-+ * backup copies of an iterator in case a form of rewinding it needed.
-+ *
-+ * It is safe to call this function from any context and state. But
-+ * note that this function is not atomic. Callers should not make copies
-+ * to/from iterators that can be accessed by other tasks/contexts.
-+ */
-+void prb_iter_copy(struct prb_iterator *dest, struct prb_iterator *src)
-+{
-+ memcpy(dest, src, sizeof(*dest));
-+}
-+
-+/*
-+ * prb_iter_init: Initialize an iterator for a ring buffer.
-+ * @iter: The iterator to initialize.
-+ * @rb: A ring buffer to that @iter should iterate.
-+ * @seq: The sequence number of the position preceding the first record.
-+ * May be NULL.
-+ *
-+ * Initialize an iterator to be used with a specified ring buffer. If @seq
-+ * is non-NULL, it will be set such that prb_iter_next() will provide a
-+ * sequence value of "@seq + 1" if no records were missed.
-+ *
-+ * It is safe to call this function from any context and state.
-+ */
-+void prb_iter_init(struct prb_iterator *iter, struct printk_ringbuffer *rb,
-+ u64 *seq)
-+{
-+ memset(iter, 0, sizeof(*iter));
-+ iter->rb = rb;
-+ iter->lpos = PRB_INIT;
-+
-+ if (!seq)
-+ return;
-+
-+ for (;;) {
-+ struct prb_iterator tmp_iter;
-+ int ret;
-+
-+ prb_iter_copy(&tmp_iter, iter);
-+
-+ ret = prb_iter_next(&tmp_iter, NULL, 0, seq);
-+ if (ret < 0)
-+ continue;
-+
-+ if (ret == 0)
-+ *seq = 0;
-+ else
-+ (*seq)--;
-+ break;
-+ }
-+}
-+
-+static bool is_valid(struct printk_ringbuffer *rb, unsigned long lpos)
-+{
-+ unsigned long head, tail;
-+
-+ tail = atomic_long_read(&rb->tail);
-+ head = atomic_long_read(&rb->head);
-+ head -= tail;
-+ lpos -= tail;
-+
-+ if (lpos >= head)
-+ return false;
-+ return true;
-+}
-+
-+/*
-+ * prb_iter_data: Retrieve the record data at the current position.
-+ * @iter: Iterator tracking the current position.
-+ * @buf: A buffer to store the data of the record. May be NULL.
-+ * @size: The size of @buf. (Ignored if @buf is NULL.)
-+ * @seq: The sequence number of the record. May be NULL.
-+ *
-+ * If @iter is at a record, provide the data and/or sequence number of that
-+ * record (if specified by the caller).
-+ *
-+ * It is safe to call this function from any context and state.
-+ *
-+ * Returns >=0 if the current record contains valid data (returns 0 if @buf
-+ * is NULL or returns the size of the data block if @buf is non-NULL) or
-+ * -EINVAL if @iter is now invalid.
-+ */
-+int prb_iter_data(struct prb_iterator *iter, char *buf, int size, u64 *seq)
-+{
-+ struct printk_ringbuffer *rb = iter->rb;
-+ unsigned long lpos = iter->lpos;
-+ unsigned int datsize = 0;
-+ struct prb_entry *e;
-+
-+ if (buf || seq) {
-+ e = to_entry(rb, lpos);
-+ if (!is_valid(rb, lpos))
-+ return -EINVAL;
-+ /* memory barrier to ensure valid lpos */
-+ smp_rmb();
-+ if (buf) {
-+ datsize = PRB_DATA_SIZE(e);
-+ /* memory barrier to ensure load of datsize */
-+ smp_rmb();
-+ if (!is_valid(rb, lpos))
-+ return -EINVAL;
-+ if (PRB_INDEX(rb, lpos) + datsize >
-+ PRB_SIZE(rb) - PRB_DATA_ALIGN) {
-+ return -EINVAL;
-+ }
-+ if (size > datsize)
-+ size = datsize;
-+ memcpy(buf, &e->data[0], size);
-+ }
-+ if (seq)
-+ *seq = e->seq;
-+ /* memory barrier to ensure loads of entry data */
-+ smp_rmb();
-+ }
-+
-+ if (!is_valid(rb, lpos))
-+ return -EINVAL;
-+
-+ return datsize;
-+}
-+
-+/*
-+ * prb_iter_next: Advance to the next record.
-+ * @iter: Iterator tracking the current position.
-+ * @buf: A buffer to store the data of the next record. May be NULL.
-+ * @size: The size of @buf. (Ignored if @buf is NULL.)
-+ * @seq: The sequence number of the next record. May be NULL.
-+ *
-+ * If a next record is available, @iter is advanced and (if specified)
-+ * the data and/or sequence number of that record are provided.
-+ *
-+ * It is safe to call this function from any context and state.
-+ *
-+ * Returns 1 if @iter was advanced, 0 if @iter is at the end of the list, or
-+ * -EINVAL if @iter is now invalid.
-+ */
-+int prb_iter_next(struct prb_iterator *iter, char *buf, int size, u64 *seq)
-+{
-+ struct printk_ringbuffer *rb = iter->rb;
-+ unsigned long next_lpos;
-+ struct prb_entry *e;
-+ unsigned int esize;
-+
-+ if (iter->lpos == PRB_INIT) {
-+ next_lpos = atomic_long_read(&rb->tail);
-+ } else {
-+ if (!is_valid(rb, iter->lpos))
-+ return -EINVAL;
-+ /* memory barrier to ensure valid lpos */
-+ smp_rmb();
-+ e = to_entry(rb, iter->lpos);
-+ esize = e->size;
-+ /* memory barrier to ensure load of size */
-+ smp_rmb();
-+ if (!is_valid(rb, iter->lpos))
-+ return -EINVAL;
-+ next_lpos = iter->lpos + esize;
-+ }
-+ if (next_lpos == atomic_long_read(&rb->head))
-+ return 0;
-+ if (!is_valid(rb, next_lpos))
-+ return -EINVAL;
-+ /* memory barrier to ensure valid lpos */
-+ smp_rmb();
-+
-+ iter->lpos = next_lpos;
-+ e = to_entry(rb, iter->lpos);
-+ esize = e->size;
-+ /* memory barrier to ensure load of size */
-+ smp_rmb();
-+ if (!is_valid(rb, iter->lpos))
-+ return -EINVAL;
-+ if (esize == -1)
-+ iter->lpos = PRB_WRAP_LPOS(rb, iter->lpos, 1);
-+
-+ if (prb_iter_data(iter, buf, size, seq) < 0)
-+ return -EINVAL;
-+
-+ return 1;
-+}
diff --git a/debian/patches-rt/0011-printk_safe-remove-printk-safe-code.patch b/debian/patches-rt/0005-printk-remove-safe-buffers.patch
index fad755708..485b44ccd 100644
--- a/debian/patches-rt/0011-printk_safe-remove-printk-safe-code.patch
+++ b/debian/patches-rt/0005-printk-remove-safe-buffers.patch
@@ -1,30 +1,29 @@
From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:49 +0100
-Subject: [PATCH 11/25] printk_safe: remove printk safe code
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Date: Wed, 14 Oct 2020 20:00:11 +0200
+Subject: [PATCH 05/15] printk: remove safe buffers
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
-vprintk variants are now NMI-safe so there is no longer a need for
-the "safe" calls.
+With @logbuf_lock removed, the high level printk functions for
+storing messages are lockless. Messages can be stored from any
+context, so there is no need for the NMI and safe buffers anymore.
-NOTE: This also removes printk flushing functionality.
+Remove the NMI and safe buffers. In NMI or safe contexts, store
+the message immediately but still use irq_work to defer the console
+printing.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/powerpc/kernel/traps.c | 1
arch/powerpc/kernel/watchdog.c | 5
- include/linux/hardirq.h | 2
- include/linux/printk.h | 21 --
+ include/linux/printk.h | 10 -
kernel/kexec_core.c | 1
kernel/panic.c | 3
- kernel/printk/Makefile | 1
- kernel/printk/internal.h | 35 ---
- kernel/printk/printk.c | 47 ----
- kernel/printk/printk_safe.c | 414 -----------------------------------------
- kernel/trace/trace.c | 2
+ kernel/printk/internal.h | 2
+ kernel/printk/printk.c | 3
+ kernel/printk/printk_safe.c | 324 -----------------------------------------
lib/nmi_backtrace.c | 6
- 12 files changed, 7 insertions(+), 531 deletions(-)
- delete mode 100644 kernel/printk/printk_safe.c
+ 9 files changed, 5 insertions(+), 350 deletions(-)
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -50,46 +49,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (sysctl_hardlockup_all_cpu_backtrace)
trigger_allbutself_cpu_backtrace();
---- a/include/linux/hardirq.h
-+++ b/include/linux/hardirq.h
-@@ -115,7 +115,6 @@ extern void rcu_nmi_exit(void);
- do { \
- lockdep_off(); \
- arch_nmi_enter(); \
-- printk_nmi_enter(); \
- BUG_ON(in_nmi() == NMI_MASK); \
- __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
- } while (0)
-@@ -134,7 +133,6 @@ extern void rcu_nmi_exit(void);
- do { \
- BUG_ON(!in_nmi()); \
- __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
-- printk_nmi_exit(); \
- arch_nmi_exit(); \
- lockdep_on(); \
- } while (0)
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
-@@ -147,18 +147,6 @@ static inline __printf(1, 2) __cold
- void early_printk(const char *s, ...) { }
- #endif
-
--#ifdef CONFIG_PRINTK_NMI
--extern void printk_nmi_enter(void);
--extern void printk_nmi_exit(void);
--extern void printk_nmi_direct_enter(void);
--extern void printk_nmi_direct_exit(void);
--#else
--static inline void printk_nmi_enter(void) { }
--static inline void printk_nmi_exit(void) { }
--static inline void printk_nmi_direct_enter(void) { }
--static inline void printk_nmi_direct_exit(void) { }
--#endif /* PRINTK_NMI */
--
- #ifdef CONFIG_PRINTK
- asmlinkage __printf(5, 0)
- int vprintk_emit(int facility, int level,
-@@ -203,8 +191,6 @@ void __init setup_log_buf(int early);
+@@ -205,8 +205,6 @@ void __init setup_log_buf(int early);
void dump_stack_print_info(const char *log_lvl);
void show_regs_print_info(const char *log_lvl);
extern asmlinkage void dump_stack(void) __cold;
@@ -98,10 +60,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#else
static inline __printf(1, 0)
int vprintk(const char *s, va_list args)
-@@ -269,13 +255,6 @@ static inline void dump_stack(void)
+@@ -270,14 +268,6 @@ static inline void show_regs_print_info(
+ static inline void dump_stack(void)
{
}
-
+-
-static inline void printk_safe_flush(void)
-{
-}
@@ -141,188 +104,41 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
kmsg_dump(KMSG_DUMP_PANIC);
/*
---- a/kernel/printk/Makefile
-+++ b/kernel/printk/Makefile
-@@ -1,4 +1,3 @@
- # SPDX-License-Identifier: GPL-2.0-only
- obj-y = printk.o
--obj-$(CONFIG_PRINTK) += printk_safe.o
- obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
-@@ -22,35 +22,6 @@ int vprintk_store(int facility, int leve
- __printf(1, 0) int vprintk_default(const char *fmt, va_list args);
- __printf(1, 0) int vprintk_deferred(const char *fmt, va_list args);
- __printf(1, 0) int vprintk_func(const char *fmt, va_list args);
--void __printk_safe_enter(void);
--void __printk_safe_exit(void);
--
--void printk_safe_init(void);
--bool printk_percpu_data_ready(void);
--
--#define printk_safe_enter_irqsave(flags) \
-- do { \
-- local_irq_save(flags); \
-- __printk_safe_enter(); \
-- } while (0)
--
--#define printk_safe_exit_irqrestore(flags) \
-- do { \
-- __printk_safe_exit(); \
-- local_irq_restore(flags); \
-- } while (0)
--
--#define printk_safe_enter_irq() \
-- do { \
-- local_irq_disable(); \
-- __printk_safe_enter(); \
-- } while (0)
--
--#define printk_safe_exit_irq() \
-- do { \
-- __printk_safe_exit(); \
-- local_irq_enable(); \
-- } while (0)
-
- void defer_console_output(void);
+@@ -23,7 +23,6 @@ int vprintk_store(int facility, int leve
+ void __printk_safe_enter(void);
+ void __printk_safe_exit(void);
-@@ -63,12 +34,10 @@ void defer_console_output(void);
- * semaphore and some of console functions (console_unlock()/etc.), so
- * printk-safe must preserve the existing local IRQ guarantees.
- */
-+#endif /* CONFIG_PRINTK */
-+
- #define printk_safe_enter_irqsave(flags) local_irq_save(flags)
- #define printk_safe_exit_irqrestore(flags) local_irq_restore(flags)
+-void printk_safe_init(void);
+ bool printk_percpu_data_ready(void);
+ #define printk_safe_enter_irqsave(flags) \
+@@ -67,6 +66,5 @@ void defer_console_output(void);
#define printk_safe_enter_irq() local_irq_disable()
#define printk_safe_exit_irq() local_irq_enable()
--
+
-static inline void printk_safe_init(void) { }
--static inline bool printk_percpu_data_ready(void) { return false; }
--#endif /* CONFIG_PRINTK */
+ static inline bool printk_percpu_data_ready(void) { return false; }
+ #endif /* CONFIG_PRINTK */
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -461,18 +461,6 @@ static char __log_buf[__LOG_BUF_LEN] __a
- static char *log_buf = __log_buf;
- static u32 log_buf_len = __LOG_BUF_LEN;
+@@ -1037,9 +1037,6 @@ static inline void log_buf_add_cpu(void)
--/*
-- * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
-- * per_cpu_areas are initialised. This variable is set to true when
-- * it's safe to access per-CPU data.
-- */
--static bool __printk_percpu_data_ready __read_mostly;
--
--bool printk_percpu_data_ready(void)
--{
-- return __printk_percpu_data_ready;
--}
--
- /* Return log buffer address */
- char *log_buf_addr_get(void)
+ static void __init set_percpu_data_ready(void)
{
-@@ -1069,28 +1057,12 @@ static void __init log_buf_add_cpu(void)
- static inline void log_buf_add_cpu(void) {}
- #endif /* CONFIG_SMP */
-
--static void __init set_percpu_data_ready(void)
--{
- printk_safe_init();
- /* Make sure we set this flag only after printk_safe() init is done */
- barrier();
-- __printk_percpu_data_ready = true;
--}
--
- void __init setup_log_buf(int early)
- {
- unsigned long flags;
- char *new_log_buf;
- unsigned int free;
-
-- /*
-- * Some archs call setup_log_buf() multiple times - first is very
-- * early, e.g. from setup_arch(), and second - when percpu_areas
-- * are initialised.
-- */
-- if (!early)
-- set_percpu_data_ready();
--
- if (log_buf != __log_buf)
- return;
-
-@@ -1769,13 +1741,6 @@ static bool cont_add(u32 caller_id, int
- }
- #endif /* 0 */
-
--int vprintk_store(int facility, int level,
-- const char *dict, size_t dictlen,
-- const char *fmt, va_list args)
--{
-- return vprintk_emit(facility, level, dict, dictlen, fmt, args);
--}
--
- /* ring buffer used as memory allocator for temporary sprint buffers */
- DECLARE_STATIC_PRINTKRB(sprint_rb,
- ilog2(PRINTK_RECORD_MAX + sizeof(struct prb_entry) +
-@@ -1844,6 +1809,11 @@ asmlinkage int vprintk_emit(int facility
+ __printk_percpu_data_ready = true;
}
- EXPORT_SYMBOL(vprintk_emit);
-+__printf(1, 0) int vprintk_func(const char *fmt, va_list args)
-+{
-+ return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
-+}
-+
- asmlinkage int vprintk(const char *fmt, va_list args)
- {
- return vprintk_func(fmt, args);
-@@ -2830,9 +2800,6 @@ static DEFINE_PER_CPU(struct irq_work, w
+--- a/kernel/printk/printk_safe.c
++++ b/kernel/printk/printk_safe.c
+@@ -15,274 +15,9 @@
- void wake_up_klogd(void)
- {
-- if (!printk_percpu_data_ready())
-- return;
--
- preempt_disable();
- if (waitqueue_active(&log_wait)) {
- this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
-@@ -2910,9 +2877,6 @@ late_initcall(init_printk_kthread);
+ #include "internal.h"
- void defer_console_output(void)
- {
-- if (!printk_percpu_data_ready())
-- return;
--
- preempt_disable();
- __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
- irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
-@@ -3312,5 +3276,4 @@ void kmsg_dump_rewind(struct kmsg_dumper
- logbuf_unlock_irqrestore(flags);
- }
- EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
--
- #endif
---- a/kernel/printk/printk_safe.c
-+++ /dev/null
-@@ -1,414 +0,0 @@
--// SPDX-License-Identifier: GPL-2.0-or-later
--/*
-- * printk_safe.c - Safe printk for printk-deadlock-prone contexts
-- */
--
--#include <linux/preempt.h>
--#include <linux/spinlock.h>
--#include <linux/debug_locks.h>
--#include <linux/kdb.h>
--#include <linux/smp.h>
--#include <linux/cpumask.h>
--#include <linux/irq_work.h>
--#include <linux/printk.h>
--#include <linux/kprobes.h>
--
--#include "internal.h"
--
-/*
- * printk() could not take logbuf_lock in NMI context. Instead,
- * it uses an alternative implementation that temporary stores
@@ -351,9 +167,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-};
-
-static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
--static DEFINE_PER_CPU(int, printk_context);
--
--#ifdef CONFIG_PRINTK_NMI
+ static DEFINE_PER_CPU(int, printk_context);
+
+ #ifdef CONFIG_PRINTK_NMI
-static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
-#endif
-
@@ -574,18 +390,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- */
-void printk_safe_flush_on_panic(void)
-{
-- /*
-- * Make sure that we could access the main ring buffer.
-- * Do not risk a double release when more CPUs are up.
-- */
-- if (raw_spin_is_locked(&logbuf_lock)) {
-- if (num_online_cpus() > 1)
-- return;
--
-- debug_locks_off();
-- raw_spin_lock_init(&logbuf_lock);
-- }
--
- printk_safe_flush();
-}
-
@@ -603,37 +407,25 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- return printk_safe_log_store(s, fmt, args);
-}
-
--void noinstr printk_nmi_enter(void)
--{
-- this_cpu_add(printk_context, PRINTK_NMI_CONTEXT_OFFSET);
--}
--
--void noinstr printk_nmi_exit(void)
--{
-- this_cpu_sub(printk_context, PRINTK_NMI_CONTEXT_OFFSET);
--}
--
--/*
-- * Marks a code that might produce many messages in NMI context
-- * and the risk of losing them is more critical than eventual
-- * reordering.
+ void noinstr printk_nmi_enter(void)
+ {
+ this_cpu_add(printk_context, PRINTK_NMI_CONTEXT_OFFSET);
+@@ -297,11 +32,6 @@ void noinstr printk_nmi_exit(void)
+ * Marks a code that might produce many messages in NMI context
+ * and the risk of losing them is more critical than eventual
+ * reordering.
- *
- * It has effect only when called in NMI context. Then printk()
- * will try to store the messages into the main logbuf directly
- * and use the per-CPU buffers only as a fallback when the lock
- * is not available.
-- */
--void printk_nmi_direct_enter(void)
--{
-- if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
-- this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
--}
--
--void printk_nmi_direct_exit(void)
--{
-- this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
--}
--
+ */
+ void printk_nmi_direct_enter(void)
+ {
+@@ -314,27 +44,8 @@ void printk_nmi_direct_exit(void)
+ this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
+ }
+
-#else
-
-static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
@@ -641,8 +433,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- return 0;
-}
-
--#endif /* CONFIG_PRINTK_NMI */
--
+ #endif /* CONFIG_PRINTK_NMI */
+
-/*
- * Lock-less printk(), to avoid deadlocks should the printk() recurse
- * into itself. It uses a per-CPU buffer to store the message, just like
@@ -655,41 +447,30 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- return printk_safe_log_store(s, fmt, args);
-}
-
--/* Can be preempted by NMI. */
--void __printk_safe_enter(void)
--{
-- this_cpu_inc(printk_context);
--}
--
--/* Can be preempted by NMI. */
--void __printk_safe_exit(void)
--{
-- this_cpu_dec(printk_context);
--}
--
--__printf(1, 0) int vprintk_func(const char *fmt, va_list args)
--{
--#ifdef CONFIG_KGDB_KDB
-- /* Allow to pass printk() to kdb but avoid a recursion. */
-- if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0))
-- return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
--#endif
--
-- /*
+ /* Can be preempted by NMI. */
+ void __printk_safe_enter(void)
+ {
+@@ -356,10 +67,13 @@ void __printk_safe_exit(void)
+ #endif
+
+ /*
- * Try to use the main logbuf even in NMI. But avoid calling console
-- * drivers that might have their own locks.
-- */
-- if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) &&
-- raw_spin_trylock(&logbuf_lock)) {
-- int len;
--
-- len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
-- raw_spin_unlock(&logbuf_lock);
-- defer_console_output();
-- return len;
-- }
--
-- /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
++ * Store to the ringbuffer, even in NMI. But avoid calling console
+ * drivers that might have their own locks.
+ */
+- if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK)) {
++ if (this_cpu_read(printk_context) &
++ (PRINTK_NMI_DIRECT_CONTEXT_MASK |
++ PRINTK_NMI_CONTEXT_MASK |
++ PRINTK_SAFE_CONTEXT_MASK)) {
+ int len;
+
+ len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
+@@ -367,34 +81,6 @@ void __printk_safe_exit(void)
+ return len;
+ }
+
+- /* Use extra buffer in NMI or in safe mode. */
- if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
- return vprintk_nmi(fmt, args);
-
@@ -697,9 +478,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
- return vprintk_safe(fmt, args);
-
-- /* No obstacles. */
-- return vprintk_default(fmt, args);
--}
+ /* No obstacles. */
+ return vprintk_default(fmt, args);
+ }
-
-void __init printk_safe_init(void)
-{
@@ -720,24 +501,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- /* Flush pending messages that did not have scheduled IRQ works. */
- printk_safe_flush();
-}
---- a/kernel/trace/trace.c
-+++ b/kernel/trace/trace.c
-@@ -9249,7 +9249,6 @@ void ftrace_dump(enum ftrace_dump_mode o
- tracing_off();
-
- local_irq_save(flags);
-- printk_nmi_direct_enter();
-
- /* Simulate the iterator */
- trace_init_global_iter(&iter);
-@@ -9329,7 +9328,6 @@ void ftrace_dump(enum ftrace_dump_mode o
- atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
- }
- atomic_dec(&dump_running);
-- printk_nmi_direct_exit();
- local_irq_restore(flags);
- }
- EXPORT_SYMBOL_GPL(ftrace_dump);
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -75,12 +75,6 @@ void nmi_trigger_cpumask_backtrace(const
diff --git a/debian/patches-rt/0005-timekeeping-Use-seqcount_latch_t.patch b/debian/patches-rt/0005-timekeeping-Use-seqcount_latch_t.patch
index fc0cad066..5b7611961 100644
--- a/debian/patches-rt/0005-timekeeping-Use-seqcount_latch_t.patch
+++ b/debian/patches-rt/0005-timekeeping-Use-seqcount_latch_t.patch
@@ -4,7 +4,7 @@ Subject: [PATCH 05/13] timekeeping: Use seqcount_latch_t
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Latch sequence counters are a multiversion concurrency control mechanism
where the seqcount_t counter even/odd value is used to switch between
diff --git a/debian/patches-rt/0005-workqueue-Manually-break-affinity-on-hotplug.patch b/debian/patches-rt/0005-workqueue-Manually-break-affinity-on-hotplug.patch
index a78cf34fa..307c7443f 100644
--- a/debian/patches-rt/0005-workqueue-Manually-break-affinity-on-hotplug.patch
+++ b/debian/patches-rt/0005-workqueue-Manually-break-affinity-on-hotplug.patch
@@ -1,12 +1,13 @@
From: Peter Zijlstra <peterz@infradead.org>
-Date: Mon, 5 Oct 2020 16:57:22 +0200
-Subject: [PATCH 05/17] workqueue: Manually break affinity on hotplug
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Date: Fri, 23 Oct 2020 12:12:03 +0200
+Subject: [PATCH 05/19] workqueue: Manually break affinity on hotplug
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Don't rely on the scheduler to force break affinity for us -- it will
stop doing that for per-cpu-kthreads.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/workqueue.c | 4 ++++
diff --git a/debian/patches-rt/0002-printk-rb-add-prb-locking-functions.patch b/debian/patches-rt/0006-console-add-write_atomic-interface.patch
index d9c67f673..9393dcfb1 100644
--- a/debian/patches-rt/0002-printk-rb-add-prb-locking-functions.patch
+++ b/debian/patches-rt/0006-console-add-write_atomic-interface.patch
@@ -1,53 +1,58 @@
From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:40 +0100
-Subject: [PATCH 02/25] printk-rb: add prb locking functions
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Date: Wed, 14 Oct 2020 20:26:35 +0200
+Subject: [PATCH 06/15] console: add write_atomic interface
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
-Add processor-reentrant spin locking functions. These allow
-restricting the number of possible contexts to 2, which can simplify
-implementing code that also supports NMI interruptions.
+Add a write_atomic() callback to the console. This is an optional
+function for console drivers. The function must be atomic (including
+NMI safe) for writing to the console.
- prb_lock();
+Console drivers must still implement the write() callback. The
+write_atomic() callback will only be used in special situations,
+such as when the kernel panics.
- /*
- * This code is synchronized with all contexts
- * except an NMI on the same processor.
- */
+Creating an NMI safe write_atomic() that must synchronize with
+write() requires a careful implementation of the console driver. To
+aid with the implementation, a set of console_atomic_*() functions
+are provided:
- prb_unlock();
+ void console_atomic_lock(unsigned int *flags);
+ void console_atomic_unlock(unsigned int flags);
-In order to support printk's emergency messages, a
-processor-reentrant spin lock will be used to control raw access to
-the emergency console. However, it must be the same
-processor-reentrant spin lock as the one used by the ring buffer,
-otherwise a deadlock can occur:
-
- CPU1: printk lock -> emergency -> serial lock
- CPU2: serial lock -> printk lock
-
-By making the processor-reentrant implemtation available externally,
-printk can use the same atomic_t for the ring buffer as for the
-emergency console and thus avoid the above deadlock.
+These functions synchronize using a processor-reentrant spinlock
+(called a cpulock).
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/printk_ringbuffer.h | 24 +++++++++++
- lib/Makefile | 2
- lib/printk_ringbuffer.c | 77 ++++++++++++++++++++++++++++++++++++++
- 3 files changed, 102 insertions(+), 1 deletion(-)
- create mode 100644 include/linux/printk_ringbuffer.h
- create mode 100644 lib/printk_ringbuffer.c
+ include/linux/console.h | 4 +
+ kernel/printk/printk.c | 100 ++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 104 insertions(+)
---- /dev/null
-+++ b/include/linux/printk_ringbuffer.h
-@@ -0,0 +1,24 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+#ifndef _LINUX_PRINTK_RINGBUFFER_H
-+#define _LINUX_PRINTK_RINGBUFFER_H
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -141,6 +141,7 @@ static inline int con_debug_leave(void)
+ struct console {
+ char name[16];
+ void (*write)(struct console *, const char *, unsigned);
++ void (*write_atomic)(struct console *, const char *, unsigned);
+ int (*read)(struct console *, char *, unsigned);
+ struct tty_driver *(*device)(struct console *, int *);
+ void (*unblank)(void);
+@@ -230,4 +231,7 @@ extern void console_init(void);
+ void dummycon_register_output_notifier(struct notifier_block *nb);
+ void dummycon_unregister_output_notifier(struct notifier_block *nb);
+
++extern void console_atomic_lock(unsigned int *flags);
++extern void console_atomic_unlock(unsigned int flags);
+
-+#include <linux/atomic.h>
-+#include <linux/percpu.h>
+ #endif /* _LINUX_CONSOLE_H */
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3516,3 +3516,103 @@ void kmsg_dump_rewind(struct kmsg_dumper
+ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
+
+ #endif
+
+struct prb_cpulock {
+ atomic_t owner;
@@ -61,29 +66,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ .irqflags = &_##name##_percpu_irqflags, \
+}
+
-+/* utility functions */
-+void prb_lock(struct prb_cpulock *cpu_lock, unsigned int *cpu_store);
-+void prb_unlock(struct prb_cpulock *cpu_lock, unsigned int cpu_store);
-+
-+#endif /*_LINUX_PRINTK_RINGBUFFER_H */
---- a/lib/Makefile
-+++ b/lib/Makefile
-@@ -32,7 +32,7 @@ KCSAN_SANITIZE_random32.o := n
-
- lib-y := ctype.o string.o vsprintf.o cmdline.o \
- rbtree.o radix-tree.o timerqueue.o xarray.o \
-- idr.o extable.o sha1.o irq_regs.o argv_split.o \
-+ idr.o extable.o sha1.o irq_regs.o argv_split.o printk_ringbuffer.o \
- flex_proportions.o ratelimit.o show_mem.o \
- is_single_threaded.o plist.o decompress.o kobject_uevent.o \
- earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
---- /dev/null
-+++ b/lib/printk_ringbuffer.c
-@@ -0,0 +1,77 @@
-+// SPDX-License-Identifier: GPL-2.0
-+#include <linux/smp.h>
-+#include <linux/printk_ringbuffer.h>
-+
+static bool __prb_trylock(struct prb_cpulock *cpu_lock,
+ unsigned int *cpu_store)
+{
@@ -157,3 +139,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+ put_cpu();
+}
++
++DECLARE_STATIC_PRINTKRB_CPULOCK(printk_cpulock);
++
++void console_atomic_lock(unsigned int *flags)
++{
++ prb_lock(&printk_cpulock, flags);
++}
++EXPORT_SYMBOL(console_atomic_lock);
++
++void console_atomic_unlock(unsigned int flags)
++{
++ prb_unlock(&printk_cpulock, flags);
++}
++EXPORT_SYMBOL(console_atomic_unlock);
diff --git a/debian/patches-rt/0006-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch b/debian/patches-rt/0006-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch
index c104e4f1b..0bd7e02cb 100644
--- a/debian/patches-rt/0006-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch
+++ b/debian/patches-rt/0006-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch
@@ -1,7 +1,7 @@
From: Steven Rostedt <rostedt@goodmis.org>
Date: Tue, 14 Jul 2015 14:26:34 +0200
-Subject: [PATCH 06/23] futex: Fix bug on when a requeued RT task times out
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Subject: [PATCH 06/22] futex: Fix bug on when a requeued RT task times out
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Requeue with timeout causes a bug with PREEMPT_RT.
diff --git a/debian/patches-rt/0006-printk-rb-add-blocking-reader-support.patch b/debian/patches-rt/0006-printk-rb-add-blocking-reader-support.patch
deleted file mode 100644
index f3ab52e2a..000000000
--- a/debian/patches-rt/0006-printk-rb-add-blocking-reader-support.patch
+++ /dev/null
@@ -1,162 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:44 +0100
-Subject: [PATCH 06/25] printk-rb: add blocking reader support
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-Add a blocking read function for readers. An irq_work function is
-used to signal the wait queue so that write notification can
-be triggered from any context.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/printk_ringbuffer.h | 20 +++++++++++++
- lib/printk_ringbuffer.c | 55 ++++++++++++++++++++++++++++++++++++++
- 2 files changed, 75 insertions(+)
-
---- a/include/linux/printk_ringbuffer.h
-+++ b/include/linux/printk_ringbuffer.h
-@@ -2,8 +2,10 @@
- #ifndef _LINUX_PRINTK_RINGBUFFER_H
- #define _LINUX_PRINTK_RINGBUFFER_H
-
-+#include <linux/irq_work.h>
- #include <linux/atomic.h>
- #include <linux/percpu.h>
-+#include <linux/wait.h>
-
- struct prb_cpulock {
- atomic_t owner;
-@@ -22,6 +24,10 @@ struct printk_ringbuffer {
-
- struct prb_cpulock *cpulock;
- atomic_t ctx;
-+
-+ struct wait_queue_head *wq;
-+ atomic_long_t wq_counter;
-+ struct irq_work *wq_work;
- };
-
- struct prb_entry {
-@@ -59,6 +65,15 @@ struct prb_iterator {
- #define DECLARE_STATIC_PRINTKRB(name, szbits, cpulockptr) \
- static char _##name##_buffer[1 << (szbits)] \
- __aligned(__alignof__(long)); \
-+static DECLARE_WAIT_QUEUE_HEAD(_##name##_wait); \
-+static void _##name##_wake_work_func(struct irq_work *irq_work) \
-+{ \
-+ wake_up_interruptible_all(&_##name##_wait); \
-+} \
-+static struct irq_work _##name##_wake_work = { \
-+ .func = _##name##_wake_work_func, \
-+ .flags = ATOMIC_INIT(IRQ_WORK_LAZY), \
-+}; \
- static struct printk_ringbuffer name = { \
- .buffer = &_##name##_buffer[0], \
- .size_bits = szbits, \
-@@ -68,6 +83,9 @@ static struct printk_ringbuffer name = {
- .reserve = ATOMIC_LONG_INIT(-111 * sizeof(long)), \
- .cpulock = cpulockptr, \
- .ctx = ATOMIC_INIT(0), \
-+ .wq = &_##name##_wait, \
-+ .wq_counter = ATOMIC_LONG_INIT(0), \
-+ .wq_work = &_##name##_wake_work, \
- }
-
- /* writer interface */
-@@ -80,6 +98,8 @@ void prb_iter_init(struct prb_iterator *
- u64 *seq);
- void prb_iter_copy(struct prb_iterator *dest, struct prb_iterator *src);
- int prb_iter_next(struct prb_iterator *iter, char *buf, int size, u64 *seq);
-+int prb_iter_wait_next(struct prb_iterator *iter, char *buf, int size,
-+ u64 *seq);
- int prb_iter_data(struct prb_iterator *iter, char *buf, int size, u64 *seq);
-
- /* utility functions */
---- a/lib/printk_ringbuffer.c
-+++ b/lib/printk_ringbuffer.c
-@@ -1,4 +1,5 @@
- // SPDX-License-Identifier: GPL-2.0
-+#include <linux/sched.h>
- #include <linux/smp.h>
- #include <linux/string.h>
- #include <linux/errno.h>
-@@ -154,6 +155,7 @@ static bool push_tail(struct printk_ring
- void prb_commit(struct prb_handle *h)
- {
- struct printk_ringbuffer *rb = h->rb;
-+ bool changed = false;
- struct prb_entry *e;
- unsigned long head;
- unsigned long res;
-@@ -175,6 +177,7 @@ void prb_commit(struct prb_handle *h)
- }
- e->seq = ++rb->seq;
- head += e->size;
-+ changed = true;
- }
- atomic_long_set_release(&rb->head, res);
- atomic_dec(&rb->ctx);
-@@ -185,6 +188,18 @@ void prb_commit(struct prb_handle *h)
- }
-
- prb_unlock(rb->cpulock, h->cpu);
-+
-+ if (changed) {
-+ atomic_long_inc(&rb->wq_counter);
-+ if (wq_has_sleeper(rb->wq)) {
-+#ifdef CONFIG_IRQ_WORK
-+ irq_work_queue(rb->wq_work);
-+#else
-+ if (!in_nmi())
-+ wake_up_interruptible_all(rb->wq);
-+#endif
-+ }
-+ }
- }
-
- /*
-@@ -437,3 +452,43 @@ int prb_iter_next(struct prb_iterator *i
-
- return 1;
- }
-+
-+/*
-+ * prb_iter_wait_next: Advance to the next record, blocking if none available.
-+ * @iter: Iterator tracking the current position.
-+ * @buf: A buffer to store the data of the next record. May be NULL.
-+ * @size: The size of @buf. (Ignored if @buf is NULL.)
-+ * @seq: The sequence number of the next record. May be NULL.
-+ *
-+ * If a next record is already available, this function works like
-+ * prb_iter_next(). Otherwise block interruptible until a next record is
-+ * available.
-+ *
-+ * When a next record is available, @iter is advanced and (if specified)
-+ * the data and/or sequence number of that record are provided.
-+ *
-+ * This function might sleep.
-+ *
-+ * Returns 1 if @iter was advanced, -EINVAL if @iter is now invalid, or
-+ * -ERESTARTSYS if interrupted by a signal.
-+ */
-+int prb_iter_wait_next(struct prb_iterator *iter, char *buf, int size, u64 *seq)
-+{
-+ unsigned long last_seen;
-+ int ret;
-+
-+ for (;;) {
-+ last_seen = atomic_long_read(&iter->rb->wq_counter);
-+
-+ ret = prb_iter_next(iter, buf, size, seq);
-+ if (ret != 0)
-+ break;
-+
-+ ret = wait_event_interruptible(*iter->rb->wq,
-+ last_seen != atomic_long_read(&iter->rb->wq_counter));
-+ if (ret < 0)
-+ break;
-+ }
-+
-+ return ret;
-+}
diff --git a/debian/patches-rt/0006-printk-ringbuffer-support-dataless-records.patch b/debian/patches-rt/0006-printk-ringbuffer-support-dataless-records.patch
new file mode 100644
index 000000000..ad5d6043a
--- /dev/null
+++ b/debian/patches-rt/0006-printk-ringbuffer-support-dataless-records.patch
@@ -0,0 +1,253 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Tue, 21 Jul 2020 15:31:28 +0206
+Subject: [PATCH 06/25] printk: ringbuffer: support dataless records
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+With commit 896fbe20b4e2333fb55 ("printk: use the lockless ringbuffer"),
+printk() started silently dropping messages without text because such
+records are not supported by the new printk ringbuffer.
+
+Add support for such records.
+
+Currently dataless records are denoted by INVALID_LPOS in order
+to recognize failed prb_reserve() calls. Change the ringbuffer
+to instead use two different identifiers (FAILED_LPOS and
+NO_LPOS) to distinguish between failed prb_reserve() records and
+successful dataless records, respectively.
+
+Fixes: 896fbe20b4e2333fb55 ("printk: use the lockless ringbuffer")
+Fixes: https://lkml.kernel.org/r/20200718121053.GA691245@elver.google.com
+Reported-by: Marco Elver <elver@google.com>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Cc: Petr Mladek <pmladek@suse.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Marco Elver <elver@google.com>
+Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200721132528.9661-1-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk_ringbuffer.c | 72 ++++++++++++++++++--------------------
+ kernel/printk/printk_ringbuffer.h | 15 ++++---
+ 2 files changed, 43 insertions(+), 44 deletions(-)
+
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -264,6 +264,9 @@
+ /* Determine how many times the data array has wrapped. */
+ #define DATA_WRAPS(data_ring, lpos) ((lpos) >> (data_ring)->size_bits)
+
++/* Determine if a logical position refers to a data-less block. */
++#define LPOS_DATALESS(lpos) ((lpos) & 1UL)
++
+ /* Get the logical position at index 0 of the current wrap. */
+ #define DATA_THIS_WRAP_START_LPOS(data_ring, lpos) \
+ ((lpos) & ~DATA_SIZE_MASK(data_ring))
+@@ -320,21 +323,13 @@ static unsigned int to_blk_size(unsigned
+ * block does not exceed the maximum possible size that could fit within the
+ * ringbuffer. This function provides that basic size check so that the
+ * assumption is safe.
+- *
+- * Writers are also not allowed to write 0-sized (data-less) records. Such
+- * records are used only internally by the ringbuffer.
+ */
+ static bool data_check_size(struct prb_data_ring *data_ring, unsigned int size)
+ {
+ struct prb_data_block *db = NULL;
+
+- /*
+- * Writers are not allowed to write data-less records. Such records
+- * are used only internally by the ringbuffer to denote records where
+- * their data failed to allocate or have been lost.
+- */
+ if (size == 0)
+- return false;
++ return true;
+
+ /*
+ * Ensure the alignment padded size could possibly fit in the data
+@@ -568,8 +563,8 @@ static bool data_push_tail(struct printk
+ unsigned long tail_lpos;
+ unsigned long next_lpos;
+
+- /* If @lpos is not valid, there is nothing to do. */
+- if (lpos == INVALID_LPOS)
++ /* If @lpos is from a data-less block, there is nothing to do. */
++ if (LPOS_DATALESS(lpos))
+ return true;
+
+ /*
+@@ -962,8 +957,8 @@ static char *data_alloc(struct printk_ri
+
+ if (size == 0) {
+ /* Specify a data-less block. */
+- blk_lpos->begin = INVALID_LPOS;
+- blk_lpos->next = INVALID_LPOS;
++ blk_lpos->begin = NO_LPOS;
++ blk_lpos->next = NO_LPOS;
+ return NULL;
+ }
+
+@@ -976,8 +971,8 @@ static char *data_alloc(struct printk_ri
+
+ if (!data_push_tail(rb, data_ring, next_lpos - DATA_SIZE(data_ring))) {
+ /* Failed to allocate, specify a data-less block. */
+- blk_lpos->begin = INVALID_LPOS;
+- blk_lpos->next = INVALID_LPOS;
++ blk_lpos->begin = FAILED_LPOS;
++ blk_lpos->next = FAILED_LPOS;
+ return NULL;
+ }
+
+@@ -1025,6 +1020,10 @@ static char *data_alloc(struct printk_ri
+ static unsigned int space_used(struct prb_data_ring *data_ring,
+ struct prb_data_blk_lpos *blk_lpos)
+ {
++ /* Data-less blocks take no space. */
++ if (LPOS_DATALESS(blk_lpos->begin))
++ return 0;
++
+ if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) {
+ /* Data block does not wrap. */
+ return (DATA_INDEX(data_ring, blk_lpos->next) -
+@@ -1080,11 +1079,8 @@ bool prb_reserve(struct prb_reserved_ent
+ if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
+ goto fail;
+
+- /* Records are allowed to not have dictionaries. */
+- if (r->dict_buf_size) {
+- if (!data_check_size(&rb->dict_data_ring, r->dict_buf_size))
+- goto fail;
+- }
++ if (!data_check_size(&rb->dict_data_ring, r->dict_buf_size))
++ goto fail;
+
+ /*
+ * Descriptors in the reserved state act as blockers to all further
+@@ -1205,15 +1201,18 @@ void prb_commit(struct prb_reserved_entr
+ * values to possibly detect bugs in the writer code. A WARN_ON_ONCE() is
+ * triggered if an internal error is detected.
+ */
+-static char *get_data(struct prb_data_ring *data_ring,
+- struct prb_data_blk_lpos *blk_lpos,
+- unsigned int *data_size)
++static const char *get_data(struct prb_data_ring *data_ring,
++ struct prb_data_blk_lpos *blk_lpos,
++ unsigned int *data_size)
+ {
+ struct prb_data_block *db;
+
+ /* Data-less data block description. */
+- if (blk_lpos->begin == INVALID_LPOS &&
+- blk_lpos->next == INVALID_LPOS) {
++ if (LPOS_DATALESS(blk_lpos->begin) && LPOS_DATALESS(blk_lpos->next)) {
++ if (blk_lpos->begin == NO_LPOS && blk_lpos->next == NO_LPOS) {
++ *data_size = 0;
++ return "";
++ }
+ return NULL;
+ }
+
+@@ -1256,11 +1255,11 @@ static char *get_data(struct prb_data_ri
+ * (even if @text_size is 0). Each '\n' processed is counted as an additional
+ * line.
+ */
+-static unsigned int count_lines(char *text, unsigned int text_size)
++static unsigned int count_lines(const char *text, unsigned int text_size)
+ {
+ unsigned int next_size = text_size;
+ unsigned int line_count = 1;
+- char *next = text;
++ const char *next = text;
+
+ while (next_size) {
+ next = memchr(next, '\n', next_size);
+@@ -1287,7 +1286,7 @@ static bool copy_data(struct prb_data_ri
+ unsigned int buf_size, unsigned int *line_count)
+ {
+ unsigned int data_size;
+- char *data;
++ const char *data;
+
+ /* Caller might not want any data. */
+ if ((!buf || !buf_size) && !line_count)
+@@ -1317,8 +1316,7 @@ static bool copy_data(struct prb_data_ri
+
+ data_size = min_t(u16, buf_size, len);
+
+- if (!WARN_ON_ONCE(!data_size))
+- memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */
++ memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */
+ return true;
+ }
+
+@@ -1355,11 +1353,11 @@ static int desc_read_committed_seq(struc
+
+ /*
+ * A descriptor in the reusable state may no longer have its data
+- * available; report it as a data-less record. Or the record may
+- * actually be a data-less record.
++ * available; report it as existing but with lost data. Or the record
++ * may actually be a record with lost data.
+ */
+ if (d_state == desc_reusable ||
+- (blk_lpos->begin == INVALID_LPOS && blk_lpos->next == INVALID_LPOS)) {
++ (blk_lpos->begin == FAILED_LPOS && blk_lpos->next == FAILED_LPOS)) {
+ return -ENOENT;
+ }
+
+@@ -1659,10 +1657,10 @@ void prb_init(struct printk_ringbuffer *
+
+ descs[_DESCS_COUNT(descbits) - 1].info.seq = 0;
+ atomic_long_set(&(descs[_DESCS_COUNT(descbits) - 1].state_var), DESC0_SV(descbits));
+- descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.begin = INVALID_LPOS;
+- descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.next = INVALID_LPOS;
+- descs[_DESCS_COUNT(descbits) - 1].dict_blk_lpos.begin = INVALID_LPOS;
+- descs[_DESCS_COUNT(descbits) - 1].dict_blk_lpos.next = INVALID_LPOS;
++ descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.begin = FAILED_LPOS;
++ descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.next = FAILED_LPOS;
++ descs[_DESCS_COUNT(descbits) - 1].dict_blk_lpos.begin = FAILED_LPOS;
++ descs[_DESCS_COUNT(descbits) - 1].dict_blk_lpos.next = FAILED_LPOS;
+ }
+
+ /**
+--- a/kernel/printk/printk_ringbuffer.h
++++ b/kernel/printk/printk_ringbuffer.h
+@@ -120,12 +120,13 @@ struct prb_reserved_entry {
+ #define DESC_FLAGS_MASK (DESC_COMMITTED_MASK | DESC_REUSE_MASK)
+ #define DESC_ID_MASK (~DESC_FLAGS_MASK)
+ #define DESC_ID(sv) ((sv) & DESC_ID_MASK)
+-#define INVALID_LPOS 1
++#define FAILED_LPOS 0x1
++#define NO_LPOS 0x3
+
+-#define INVALID_BLK_LPOS \
++#define FAILED_BLK_LPOS \
+ { \
+- .begin = INVALID_LPOS, \
+- .next = INVALID_LPOS, \
++ .begin = FAILED_LPOS, \
++ .next = FAILED_LPOS, \
+ }
+
+ /*
+@@ -147,7 +148,7 @@ struct prb_reserved_entry {
+ *
+ * To satisfy Req1, the tail initially points to a descriptor that is
+ * minimally initialized (having no data block, i.e. data-less with the
+- * data block's lpos @begin and @next values set to INVALID_LPOS).
++ * data block's lpos @begin and @next values set to FAILED_LPOS).
+ *
+ * To satisfy Req2, the initial tail descriptor is initialized to the
+ * reusable state. Readers recognize reusable descriptors as existing
+@@ -242,8 +243,8 @@ static struct prb_desc _##name##_descs[_
+ /* reusable */ \
+ .state_var = ATOMIC_INIT(DESC0_SV(descbits)), \
+ /* no associated data block */ \
+- .text_blk_lpos = INVALID_BLK_LPOS, \
+- .dict_blk_lpos = INVALID_BLK_LPOS, \
++ .text_blk_lpos = FAILED_BLK_LPOS, \
++ .dict_blk_lpos = FAILED_BLK_LPOS, \
+ }, \
+ }; \
+ static struct printk_ringbuffer name = { \
diff --git a/debian/patches-rt/0006-sched-hotplug-Consolidate-task-migration-on-CPU-unpl.patch b/debian/patches-rt/0006-sched-hotplug-Consolidate-task-migration-on-CPU-unpl.patch
index 68af43088..a23d9079b 100644
--- a/debian/patches-rt/0006-sched-hotplug-Consolidate-task-migration-on-CPU-unpl.patch
+++ b/debian/patches-rt/0006-sched-hotplug-Consolidate-task-migration-on-CPU-unpl.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 5 Oct 2020 16:57:23 +0200
-Subject: [PATCH 06/17] sched/hotplug: Consolidate task migration on CPU unplug
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Date: Fri, 23 Oct 2020 12:12:04 +0200
+Subject: [PATCH 06/19] sched/hotplug: Consolidate task migration on CPU unplug
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
With the new mechanism which kicks tasks off the outgoing CPU at the end of
schedule() the situation on an outgoing CPU right before the stopper thread
@@ -86,7 +86,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
.name = "smpboot/threads:online",
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -6742,120 +6742,6 @@ void idle_task_exit(void)
+@@ -6741,120 +6741,6 @@ void idle_task_exit(void)
/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
}
@@ -207,7 +207,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static int __balance_push_cpu_stop(void *arg)
{
struct task_struct *p = arg;
-@@ -7127,10 +7013,6 @@ int sched_cpu_deactivate(unsigned int cp
+@@ -7123,10 +7009,6 @@ int sched_cpu_deactivate(unsigned int cp
return ret;
}
sched_domains_numa_masks_clear(cpu);
@@ -218,7 +218,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -7150,6 +7032,41 @@ int sched_cpu_starting(unsigned int cpu)
+@@ -7146,6 +7028,41 @@ int sched_cpu_starting(unsigned int cpu)
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -260,7 +260,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int sched_cpu_dying(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -7163,7 +7080,6 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -7159,7 +7076,6 @@ int sched_cpu_dying(unsigned int cpu)
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
}
diff --git a/debian/patches-rt/0006-x86-tsc-Use-seqcount_latch_t.patch b/debian/patches-rt/0006-x86-tsc-Use-seqcount_latch_t.patch
index c8fcfab1f..8c4c54ec6 100644
--- a/debian/patches-rt/0006-x86-tsc-Use-seqcount_latch_t.patch
+++ b/debian/patches-rt/0006-x86-tsc-Use-seqcount_latch_t.patch
@@ -1,7 +1,7 @@
From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
Date: Thu, 27 Aug 2020 13:40:42 +0200
Subject: [PATCH 06/13] x86/tsc: Use seqcount_latch_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Latch sequence counters have unique read and write APIs, and thus
seqcount_latch_t was recently introduced at seqlock.h.
diff --git a/debian/patches-rt/0007-locking-rtmutex-Add-rtmutex_lock_killable.patch b/debian/patches-rt/0007-locking-rtmutex-Add-rtmutex_lock_killable.patch
deleted file mode 100644
index bcf780c6f..000000000
--- a/debian/patches-rt/0007-locking-rtmutex-Add-rtmutex_lock_killable.patch
+++ /dev/null
@@ -1,50 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Thu, 9 Jun 2011 11:43:52 +0200
-Subject: [PATCH 07/23] locking/rtmutex: Add rtmutex_lock_killable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-Add "killable" type to rtmutex. We need this since rtmutex are used as
-"normal" mutexes which do use this type.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- include/linux/rtmutex.h | 1 +
- kernel/locking/rtmutex.c | 17 +++++++++++++++++
- 2 files changed, 18 insertions(+)
-
---- a/include/linux/rtmutex.h
-+++ b/include/linux/rtmutex.h
-@@ -99,6 +99,7 @@ extern void rt_mutex_lock(struct rt_mute
- #endif
-
- extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
-+extern int rt_mutex_lock_killable(struct rt_mutex *lock);
- extern int rt_mutex_trylock(struct rt_mutex *lock);
-
- extern void rt_mutex_unlock(struct rt_mutex *lock);
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -1538,6 +1538,23 @@ int __sched __rt_mutex_futex_trylock(str
- }
-
- /**
-+ * rt_mutex_lock_killable - lock a rt_mutex killable
-+ *
-+ * @lock: the rt_mutex to be locked
-+ *
-+ * Returns:
-+ * 0 on success
-+ * -EINTR when interrupted by a signal
-+ */
-+int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
-+{
-+ might_sleep();
-+
-+ return rt_mutex_fastlock(lock, TASK_KILLABLE, rt_mutex_slowlock);
-+}
-+EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
-+
-+/**
- * rt_mutex_trylock - try to lock a rt_mutex
- *
- * @lock: the rt_mutex to be locked
diff --git a/debian/patches-rt/0008-locking-rtmutex-Make-lock_killable-work.patch b/debian/patches-rt/0007-locking-rtmutex-Make-lock_killable-work.patch
index 7d6363701..4157113ab 100644
--- a/debian/patches-rt/0008-locking-rtmutex-Make-lock_killable-work.patch
+++ b/debian/patches-rt/0007-locking-rtmutex-Make-lock_killable-work.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 1 Apr 2017 12:50:59 +0200
-Subject: [PATCH 08/23] locking/rtmutex: Make lock_killable work
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Subject: [PATCH 07/22] locking/rtmutex: Make lock_killable work
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Locking an rt mutex killable does not work because signal handling is
restricted to TASK_INTERRUPTIBLE.
diff --git a/debian/patches-rt/0007-printk-rb-add-functionality-required-by-printk.patch b/debian/patches-rt/0007-printk-rb-add-functionality-required-by-printk.patch
deleted file mode 100644
index a67a64379..000000000
--- a/debian/patches-rt/0007-printk-rb-add-functionality-required-by-printk.patch
+++ /dev/null
@@ -1,160 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:45 +0100
-Subject: [PATCH 07/25] printk-rb: add functionality required by printk
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-The printk subsystem needs to be able to query the size of the ring
-buffer, seek to specific entries within the ring buffer, and track
-if records could not be stored in the ring buffer.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/printk_ringbuffer.h | 5 ++
- lib/printk_ringbuffer.c | 95 ++++++++++++++++++++++++++++++++++++++
- 2 files changed, 100 insertions(+)
-
---- a/include/linux/printk_ringbuffer.h
-+++ b/include/linux/printk_ringbuffer.h
-@@ -17,6 +17,7 @@ struct printk_ringbuffer {
- unsigned int size_bits;
-
- u64 seq;
-+ atomic_long_t lost;
-
- atomic_long_t tail;
- atomic_long_t head;
-@@ -78,6 +79,7 @@ static struct printk_ringbuffer name = {
- .buffer = &_##name##_buffer[0], \
- .size_bits = szbits, \
- .seq = 0, \
-+ .lost = ATOMIC_LONG_INIT(0), \
- .tail = ATOMIC_LONG_INIT(-111 * sizeof(long)), \
- .head = ATOMIC_LONG_INIT(-111 * sizeof(long)), \
- .reserve = ATOMIC_LONG_INIT(-111 * sizeof(long)), \
-@@ -100,9 +102,12 @@ void prb_iter_copy(struct prb_iterator *
- int prb_iter_next(struct prb_iterator *iter, char *buf, int size, u64 *seq);
- int prb_iter_wait_next(struct prb_iterator *iter, char *buf, int size,
- u64 *seq);
-+int prb_iter_seek(struct prb_iterator *iter, u64 seq);
- int prb_iter_data(struct prb_iterator *iter, char *buf, int size, u64 *seq);
-
- /* utility functions */
-+int prb_buffer_size(struct printk_ringbuffer *rb);
-+void prb_inc_lost(struct printk_ringbuffer *rb);
- void prb_lock(struct prb_cpulock *cpu_lock, unsigned int *cpu_store);
- void prb_unlock(struct prb_cpulock *cpu_lock, unsigned int cpu_store);
-
---- a/lib/printk_ringbuffer.c
-+++ b/lib/printk_ringbuffer.c
-@@ -175,11 +175,16 @@ void prb_commit(struct prb_handle *h)
- head = PRB_WRAP_LPOS(rb, head, 1);
- continue;
- }
-+ while (atomic_long_read(&rb->lost)) {
-+ atomic_long_dec(&rb->lost);
-+ rb->seq++;
-+ }
- e->seq = ++rb->seq;
- head += e->size;
- changed = true;
- }
- atomic_long_set_release(&rb->head, res);
-+
- atomic_dec(&rb->ctx);
-
- if (atomic_long_read(&rb->reserve) == res)
-@@ -492,3 +497,93 @@ int prb_iter_wait_next(struct prb_iterat
-
- return ret;
- }
-+
-+/*
-+ * prb_iter_seek: Seek forward to a specific record.
-+ * @iter: Iterator to advance.
-+ * @seq: Record number to advance to.
-+ *
-+ * Advance @iter such that a following call to prb_iter_data() will provide
-+ * the contents of the specified record. If a record is specified that does
-+ * not yet exist, advance @iter to the end of the record list.
-+ *
-+ * Note that iterators cannot be rewound. So if a record is requested that
-+ * exists but is previous to @iter in position, @iter is considered invalid.
-+ *
-+ * It is safe to call this function from any context and state.
-+ *
-+ * Returns 1 on succces, 0 if specified record does not yet exist (@iter is
-+ * now at the end of the list), or -EINVAL if @iter is now invalid.
-+ */
-+int prb_iter_seek(struct prb_iterator *iter, u64 seq)
-+{
-+ u64 cur_seq;
-+ int ret;
-+
-+ /* first check if the iterator is already at the wanted seq */
-+ if (seq == 0) {
-+ if (iter->lpos == PRB_INIT)
-+ return 1;
-+ else
-+ return -EINVAL;
-+ }
-+ if (iter->lpos != PRB_INIT) {
-+ if (prb_iter_data(iter, NULL, 0, &cur_seq) >= 0) {
-+ if (cur_seq == seq)
-+ return 1;
-+ if (cur_seq > seq)
-+ return -EINVAL;
-+ }
-+ }
-+
-+ /* iterate to find the wanted seq */
-+ for (;;) {
-+ ret = prb_iter_next(iter, NULL, 0, &cur_seq);
-+ if (ret <= 0)
-+ break;
-+
-+ if (cur_seq == seq)
-+ break;
-+
-+ if (cur_seq > seq) {
-+ ret = -EINVAL;
-+ break;
-+ }
-+ }
-+
-+ return ret;
-+}
-+
-+/*
-+ * prb_buffer_size: Get the size of the ring buffer.
-+ * @rb: The ring buffer to get the size of.
-+ *
-+ * Return the number of bytes used for the ring buffer entry storage area.
-+ * Note that this area stores both entry header and entry data. Therefore
-+ * this represents an upper bound to the amount of data that can be stored
-+ * in the ring buffer.
-+ *
-+ * It is safe to call this function from any context and state.
-+ *
-+ * Returns the size in bytes of the entry storage area.
-+ */
-+int prb_buffer_size(struct printk_ringbuffer *rb)
-+{
-+ return PRB_SIZE(rb);
-+}
-+
-+/*
-+ * prb_inc_lost: Increment the seq counter to signal a lost record.
-+ * @rb: The ring buffer to increment the seq of.
-+ *
-+ * Increment the seq counter so that a seq number is intentially missing
-+ * for the readers. This allows readers to identify that a record is
-+ * missing. A writer will typically use this function if prb_reserve()
-+ * fails.
-+ *
-+ * It is safe to call this function from any context and state.
-+ */
-+void prb_inc_lost(struct printk_ringbuffer *rb)
-+{
-+ atomic_long_inc(&rb->lost);
-+}
diff --git a/debian/patches-rt/0007-printk-reduce-LOG_BUF_SHIFT-range-for-H8300.patch b/debian/patches-rt/0007-printk-reduce-LOG_BUF_SHIFT-range-for-H8300.patch
new file mode 100644
index 000000000..011a8a84b
--- /dev/null
+++ b/debian/patches-rt/0007-printk-reduce-LOG_BUF_SHIFT-range-for-H8300.patch
@@ -0,0 +1,33 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 12 Aug 2020 09:37:22 +0206
+Subject: [PATCH 07/25] printk: reduce LOG_BUF_SHIFT range for H8300
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+The .bss section for the h8300 is relatively small. A value of
+CONFIG_LOG_BUF_SHIFT that is larger than 19 will create a static
+printk ringbuffer that is too large. Limit the range appropriately
+for the H8300.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200812073122.25412-1-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ init/Kconfig | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -682,7 +682,8 @@ config IKHEADERS
+
+ config LOG_BUF_SHIFT
+ int "Kernel log buffer size (16 => 64KB, 17 => 128KB)"
+- range 12 25
++ range 12 25 if !H8300
++ range 12 19 if H8300
+ default 17
+ depends on PRINTK
+ help
diff --git a/debian/patches-rt/0007-rbtree_latch-Use-seqcount_latch_t.patch b/debian/patches-rt/0007-rbtree_latch-Use-seqcount_latch_t.patch
index 520e3b036..9e399f273 100644
--- a/debian/patches-rt/0007-rbtree_latch-Use-seqcount_latch_t.patch
+++ b/debian/patches-rt/0007-rbtree_latch-Use-seqcount_latch_t.patch
@@ -1,7 +1,7 @@
From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
Date: Thu, 27 Aug 2020 13:40:43 +0200
Subject: [PATCH 07/13] rbtree_latch: Use seqcount_latch_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Latch sequence counters have unique read and write APIs, and thus
seqcount_latch_t was recently introduced at seqlock.h.
diff --git a/debian/patches-rt/0007-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch b/debian/patches-rt/0007-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch
index 48aba040a..0764a5972 100644
--- a/debian/patches-rt/0007-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch
+++ b/debian/patches-rt/0007-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz@infradead.org>
-Date: Mon, 5 Oct 2020 16:57:24 +0200
-Subject: [PATCH 07/17] sched: Fix hotplug vs CPU bandwidth control
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Date: Fri, 23 Oct 2020 12:12:05 +0200
+Subject: [PATCH 07/19] sched: Fix hotplug vs CPU bandwidth control
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Since we now migrate tasks away before DYING, we should also move
bandwidth unthrottle, otherwise we can gain tasks from unthrottle
@@ -19,13 +19,13 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/sched/core.c | 14 ++++++++++----
- kernel/sched/deadline.c | 5 +----
- kernel/sched/rt.c | 5 +----
- 3 files changed, 12 insertions(+), 12 deletions(-)
+ kernel/sched/deadline.c | 2 +-
+ kernel/sched/rt.c | 2 +-
+ 3 files changed, 12 insertions(+), 6 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -6981,6 +6981,8 @@ int sched_cpu_activate(unsigned int cpu)
+@@ -6977,6 +6977,8 @@ int sched_cpu_activate(unsigned int cpu)
int sched_cpu_deactivate(unsigned int cpu)
{
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int ret;
set_cpu_active(cpu, false);
-@@ -6995,6 +6997,14 @@ int sched_cpu_deactivate(unsigned int cp
+@@ -6991,6 +6993,14 @@ int sched_cpu_deactivate(unsigned int cp
balance_push_set(cpu, true);
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_SCHED_SMT
/*
* When going down, decrement the number of cores with SMT present.
-@@ -7076,10 +7086,6 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -7072,10 +7082,6 @@ int sched_cpu_dying(unsigned int cpu)
sched_tick_stop(cpu);
rq_lock_irqsave(rq, &rf);
@@ -71,16 +71,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static DEFINE_PER_CPU(struct callback_head, dl_push_head);
-@@ -2323,9 +2323,6 @@ static void rq_online_dl(struct rq *rq)
- /* Assumes rq->lock is held */
- static void rq_offline_dl(struct rq *rq)
- {
-- if (rq->dl.overloaded)
-- dl_clear_overload(rq);
--
- cpudl_clear(&rq->rd->cpudl, rq->cpu);
- cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
- }
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -265,7 +265,7 @@ static void pull_rt_task(struct rq *this
@@ -92,13 +82,3 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static inline int rt_overloaded(struct rq *rq)
-@@ -2245,9 +2245,6 @@ static void rq_online_rt(struct rq *rq)
- /* Assumes rq->lock is held */
- static void rq_offline_rt(struct rq *rq)
- {
-- if (rq->rt.overloaded)
-- rt_clear_overload(rq);
--
- __disable_runtime(rq);
-
- cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
diff --git a/debian/patches-rt/0020-serial-8250-implement-write_atomic.patch b/debian/patches-rt/0007-serial-8250-implement-write_atomic.patch
index 88e670e0e..2f80692a4 100644
--- a/debian/patches-rt/0020-serial-8250-implement-write_atomic.patch
+++ b/debian/patches-rt/0007-serial-8250-implement-write_atomic.patch
@@ -1,84 +1,100 @@
From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:58 +0100
-Subject: [PATCH 20/25] serial: 8250: implement write_atomic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Date: Wed, 14 Oct 2020 20:31:46 +0200
+Subject: [PATCH 07/15] serial: 8250: implement write_atomic
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
-Implement a non-sleeping NMI-safe write_atomic console function in
-order to support emergency printk messages.
+Implement a non-sleeping NMI-safe write_atomic() console function in
+order to support emergency console printing.
Since interrupts need to be disabled during transmit, all usage of
-the IER register was wrapped with access functions that use the
-console_atomic_lock function to synchronize register access while
-tracking the state of the interrupts. This was necessary because
-write_atomic is can be calling from an NMI context that has
-preempted write_atomic.
+the IER register is wrapped with access functions that use the
+console_atomic_lock() function to synchronize register access while
+tracking the state of the interrupts. This is necessary because
+write_atomic() can be called from an NMI context that has preempted
+write_atomic().
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- drivers/tty/serial/8250/8250.h | 22 +++++
- drivers/tty/serial/8250/8250_core.c | 19 +++-
- drivers/tty/serial/8250/8250_dma.c | 4
- drivers/tty/serial/8250/8250_port.c | 153 +++++++++++++++++++++++++++---------
- include/linux/serial_8250.h | 5 +
- 5 files changed, 157 insertions(+), 46 deletions(-)
+ drivers/tty/serial/8250/8250.h | 47 ++++++++++++++++
+ drivers/tty/serial/8250/8250_core.c | 17 ++++--
+ drivers/tty/serial/8250/8250_fsl.c | 9 +++
+ drivers/tty/serial/8250/8250_ingenic.c | 7 ++
+ drivers/tty/serial/8250/8250_mtk.c | 29 +++++++++-
+ drivers/tty/serial/8250/8250_port.c | 92 ++++++++++++++++++++-------------
+ include/linux/serial_8250.h | 5 +
+ 7 files changed, 162 insertions(+), 44 deletions(-)
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
-@@ -96,6 +96,10 @@ struct serial8250_config {
- #define SERIAL8250_SHARE_IRQS 0
- #endif
-
-+void set_ier(struct uart_8250_port *up, unsigned char ier);
-+void clear_ier(struct uart_8250_port *up);
-+void restore_ier(struct uart_8250_port *up);
-+
- #define SERIAL8250_PORT_FLAGS(_base, _irq, _flags) \
- { \
- .iobase = _base, \
-@@ -139,6 +143,15 @@ static inline bool serial8250_set_THRI(s
- return true;
+@@ -130,12 +130,55 @@ static inline void serial_dl_write(struc
+ up->dl_write(up, value);
}
-+static inline bool serial8250_set_THRI_sier(struct uart_8250_port *up)
++static inline void serial8250_set_IER(struct uart_8250_port *up,
++ unsigned char ier)
++{
++ struct uart_port *port = &up->port;
++ unsigned int flags;
++ bool is_console;
++
++ is_console = uart_console(port);
++
++ if (is_console)
++ console_atomic_lock(&flags);
++
++ serial_out(up, UART_IER, ier);
++
++ if (is_console)
++ console_atomic_unlock(flags);
++}
++
++static inline unsigned char serial8250_clear_IER(struct uart_8250_port *up)
+{
-+ if (up->ier & UART_IER_THRI)
-+ return false;
-+ up->ier |= UART_IER_THRI;
-+ set_ier(up, up->ier);
-+ return true;
++ struct uart_port *port = &up->port;
++ unsigned int clearval = 0;
++ unsigned int prior;
++ unsigned int flags;
++ bool is_console;
++
++ is_console = uart_console(port);
++
++ if (up->capabilities & UART_CAP_UUE)
++ clearval = UART_IER_UUE;
++
++ if (is_console)
++ console_atomic_lock(&flags);
++
++ prior = serial_port_in(port, UART_IER);
++ serial_port_out(port, UART_IER, clearval);
++
++ if (is_console)
++ console_atomic_unlock(flags);
++
++ return prior;
+}
+
- static inline bool serial8250_clear_THRI(struct uart_8250_port *up)
+ static inline bool serial8250_set_THRI(struct uart_8250_port *up)
{
- if (!(up->ier & UART_IER_THRI))
-@@ -148,6 +161,15 @@ static inline bool serial8250_clear_THRI
+ if (up->ier & UART_IER_THRI)
+ return false;
+ up->ier |= UART_IER_THRI;
+- serial_out(up, UART_IER, up->ier);
++ serial8250_set_IER(up, up->ier);
return true;
}
-+static inline bool serial8250_clear_THRI_sier(struct uart_8250_port *up)
-+{
-+ if (!(up->ier & UART_IER_THRI))
-+ return false;
-+ up->ier &= ~UART_IER_THRI;
-+ set_ier(up, up->ier);
-+ return true;
-+}
-+
- struct uart_8250_port *serial8250_get_port(int line);
+@@ -144,7 +187,7 @@ static inline bool serial8250_clear_THRI
+ if (!(up->ier & UART_IER_THRI))
+ return false;
+ up->ier &= ~UART_IER_THRI;
+- serial_out(up, UART_IER, up->ier);
++ serial8250_set_IER(up, up->ier);
+ return true;
+ }
- void serial8250_rpm_get(struct uart_8250_port *p);
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
-@@ -265,7 +265,7 @@ static void serial8250_timeout(struct ti
- static void serial8250_backup_timeout(struct timer_list *t)
- {
- struct uart_8250_port *up = from_timer(up, t, timer);
-- unsigned int iir, ier = 0, lsr;
-+ unsigned int iir, lsr;
- unsigned long flags;
-
- spin_lock_irqsave(&up->port.lock, flags);
@@ -274,10 +274,8 @@ static void serial8250_backup_timeout(st
* Must disable interrupts or else we risk racing with the interrupt
* based handler.
@@ -88,7 +104,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- serial_out(up, UART_IER, 0);
- }
+ if (up->port.irq)
-+ clear_ier(up);
++ ier = serial8250_clear_IER(up);
iir = serial_in(up, UART_IIR);
@@ -97,7 +113,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (up->port.irq)
- serial_out(up, UART_IER, ier);
-+ restore_ier(up);
++ serial8250_set_IER(up, ier);
spin_unlock_irqrestore(&up->port.lock, flags);
@@ -124,26 +140,93 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
.write = univ8250_console_write,
.device = uart_console_device,
.setup = univ8250_console_setup,
---- a/drivers/tty/serial/8250/8250_dma.c
-+++ b/drivers/tty/serial/8250/8250_dma.c
-@@ -35,7 +35,7 @@ static void __dma_tx_complete(void *para
+--- a/drivers/tty/serial/8250/8250_fsl.c
++++ b/drivers/tty/serial/8250/8250_fsl.c
+@@ -53,9 +53,18 @@ int fsl8250_handle_irq(struct uart_port
- ret = serial8250_tx_dma(p);
- if (ret)
-- serial8250_set_THRI(p);
-+ serial8250_set_THRI_sier(p);
+ /* Stop processing interrupts on input overrun */
+ if ((orig_lsr & UART_LSR_OE) && (up->overrun_backoff_time_ms > 0)) {
++ unsigned int ca_flags;
+ unsigned long delay;
++ bool is_console;
- spin_unlock_irqrestore(&p->port.lock, flags);
++ is_console = uart_console(port);
++
++ if (is_console)
++ console_atomic_lock(&ca_flags);
+ up->ier = port->serial_in(port, UART_IER);
++ if (is_console)
++ console_atomic_unlock(ca_flags);
++
+ if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
+ port->ops->stop_rx(port);
+ } else {
+--- a/drivers/tty/serial/8250/8250_ingenic.c
++++ b/drivers/tty/serial/8250/8250_ingenic.c
+@@ -146,6 +146,8 @@ OF_EARLYCON_DECLARE(x1000_uart, "ingenic
+
+ static void ingenic_uart_serial_out(struct uart_port *p, int offset, int value)
+ {
++ unsigned int flags;
++ bool is_console;
+ int ier;
+
+ switch (offset) {
+@@ -167,7 +169,12 @@ static void ingenic_uart_serial_out(stru
+ * If we have enabled modem status IRQs we should enable
+ * modem mode.
+ */
++ is_console = uart_console(p);
++ if (is_console)
++ console_atomic_lock(&flags);
+ ier = p->serial_in(p, UART_IER);
++ if (is_console)
++ console_atomic_unlock(flags);
+
+ if (ier & UART_IER_MSI)
+ value |= UART_MCR_MDCE | UART_MCR_FCM;
+--- a/drivers/tty/serial/8250/8250_mtk.c
++++ b/drivers/tty/serial/8250/8250_mtk.c
+@@ -213,12 +213,37 @@ static void mtk8250_shutdown(struct uart
+
+ static void mtk8250_disable_intrs(struct uart_8250_port *up, int mask)
+ {
+- serial_out(up, UART_IER, serial_in(up, UART_IER) & (~mask));
++ struct uart_port *port = &up->port;
++ unsigned int flags;
++ unsigned int ier;
++ bool is_console;
++
++ is_console = uart_console(port);
++
++ if (is_console)
++ console_atomic_lock(&flags);
++
++ ier = serial_in(up, UART_IER);
++ serial_out(up, UART_IER, ier & (~mask));
++
++ if (is_console)
++ console_atomic_unlock(flags);
}
-@@ -98,7 +98,7 @@ int serial8250_tx_dma(struct uart_8250_p
- dma_async_issue_pending(dma->txchan);
- if (dma->tx_err) {
- dma->tx_err = 0;
-- serial8250_clear_THRI(p);
-+ serial8250_clear_THRI_sier(p);
- }
- return 0;
- err:
+
+ static void mtk8250_enable_intrs(struct uart_8250_port *up, int mask)
+ {
+- serial_out(up, UART_IER, serial_in(up, UART_IER) | mask);
++ struct uart_port *port = &up->port;
++ unsigned int flags;
++ unsigned int ier;
++
++ if (uart_console(port))
++ console_atomic_lock(&flags);
++
++ ier = serial_in(up, UART_IER);
++ serial_out(up, UART_IER, ier | mask);
++
++ if (uart_console(port))
++ console_atomic_unlock(flags);
+ }
+
+ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -757,7 +757,7 @@ static void serial8250_set_sleep(struct
@@ -151,7 +234,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
serial_out(p, UART_LCR, 0);
}
- serial_out(p, UART_IER, sleep ? UART_IERX_SLEEP : 0);
-+ set_ier(p, sleep ? UART_IERX_SLEEP : 0);
++ serial8250_set_IER(p, sleep ? UART_IERX_SLEEP : 0);
if (p->capabilities & UART_CAP_EFR) {
serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(p, UART_EFR, efr);
@@ -160,7 +243,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
up->port.read_status_mask &= ~UART_LSR_DR;
- serial_port_out(port, UART_IER, up->ier);
-+ set_ier(up, up->ier);
++ serial8250_set_IER(up, up->ier);
serial8250_rpm_put(up);
}
@@ -169,34 +252,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
p->ier |= UART_IER_RLSI | UART_IER_RDI;
- serial_port_out(&p->port, UART_IER, p->ier);
-+ set_ier(p, p->ier);
++ serial8250_set_IER(p, p->ier);
}
}
EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx);
-@@ -1515,7 +1515,7 @@ static void __stop_tx_rs485(struct uart_
-
- static inline void __do_stop_tx(struct uart_8250_port *p)
- {
-- if (serial8250_clear_THRI(p))
-+ if (serial8250_clear_THRI_sier(p))
- serial8250_rpm_put_tx(p);
- }
-
-@@ -1563,7 +1563,7 @@ static inline void __start_tx(struct uar
- if (up->dma && !up->dma->tx_dma(up))
- return;
-
-- if (serial8250_set_THRI(up)) {
-+ if (serial8250_set_THRI_sier(up)) {
- if (up->bugs & UART_BUG_TXEN) {
- unsigned char lsr;
-
@@ -1687,7 +1687,7 @@ static void serial8250_disable_ms(struct
mctrl_gpio_disable_ms(up->gpios);
up->ier &= ~UART_IER_MSI;
- serial_port_out(port, UART_IER, up->ier);
-+ set_ier(up, up->ier);
++ serial8250_set_IER(up, up->ier);
}
static void serial8250_enable_ms(struct uart_port *port)
@@ -205,68 +270,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
serial8250_rpm_get(up);
- serial_port_out(port, UART_IER, up->ier);
-+ set_ier(up, up->ier);
++ serial8250_set_IER(up, up->ier);
serial8250_rpm_put(up);
}
-@@ -2083,6 +2083,52 @@ static void wait_for_xmitr(struct uart_8
- }
- }
-
-+static atomic_t ier_counter = ATOMIC_INIT(0);
-+static atomic_t ier_value = ATOMIC_INIT(0);
-+
-+void set_ier(struct uart_8250_port *up, unsigned char ier)
-+{
-+ struct uart_port *port = &up->port;
-+ unsigned int flags;
-+
-+ console_atomic_lock(&flags);
-+ if (atomic_read(&ier_counter) > 0)
-+ atomic_set(&ier_value, ier);
-+ else
-+ serial_port_out(port, UART_IER, ier);
-+ console_atomic_unlock(flags);
-+}
-+
-+void clear_ier(struct uart_8250_port *up)
-+{
-+ struct uart_port *port = &up->port;
-+ unsigned int ier_cleared = 0;
-+ unsigned int flags;
-+ unsigned int ier;
-+
-+ console_atomic_lock(&flags);
-+ atomic_inc(&ier_counter);
-+ ier = serial_port_in(port, UART_IER);
-+ if (up->capabilities & UART_CAP_UUE)
-+ ier_cleared = UART_IER_UUE;
-+ if (ier != ier_cleared) {
-+ serial_port_out(port, UART_IER, ier_cleared);
-+ atomic_set(&ier_value, ier);
-+ }
-+ console_atomic_unlock(flags);
-+}
-+
-+void restore_ier(struct uart_8250_port *up)
-+{
-+ struct uart_port *port = &up->port;
-+ unsigned int flags;
-+
-+ console_atomic_lock(&flags);
-+ if (atomic_fetch_dec(&ier_counter) == 1)
-+ serial_port_out(port, UART_IER, atomic_read(&ier_value));
-+ console_atomic_unlock(flags);
-+}
-+
- #ifdef CONFIG_CONSOLE_POLL
- /*
- * Console polling routines for writing and reading from the uart while
-@@ -2114,18 +2160,10 @@ static int serial8250_get_poll_char(stru
- static void serial8250_put_poll_char(struct uart_port *port,
- unsigned char c)
- {
-- unsigned int ier;
+@@ -2118,14 +2118,7 @@ static void serial8250_put_poll_char(str
struct uart_8250_port *up = up_to_u8250p(port);
serial8250_rpm_get(up);
@@ -278,38 +286,38 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- serial_port_out(port, UART_IER, UART_IER_UUE);
- else
- serial_port_out(port, UART_IER, 0);
-+ clear_ier(up);
++ ier = serial8250_clear_IER(up);
wait_for_xmitr(up, BOTH_EMPTY);
/*
-@@ -2138,7 +2176,7 @@ static void serial8250_put_poll_char(str
+@@ -2138,7 +2131,7 @@ static void serial8250_put_poll_char(str
* and restore the IER
*/
wait_for_xmitr(up, BOTH_EMPTY);
- serial_port_out(port, UART_IER, ier);
-+ restore_ier(up);
++ serial8250_set_IER(up, ier);
serial8250_rpm_put(up);
}
-@@ -2441,7 +2479,7 @@ void serial8250_do_shutdown(struct uart_
+@@ -2441,7 +2434,7 @@ void serial8250_do_shutdown(struct uart_
*/
spin_lock_irqsave(&port->lock, flags);
up->ier = 0;
- serial_port_out(port, UART_IER, 0);
-+ set_ier(up, 0);
++ serial8250_set_IER(up, 0);
spin_unlock_irqrestore(&port->lock, flags);
synchronize_irq(port->irq);
-@@ -2768,7 +2806,7 @@ serial8250_do_set_termios(struct uart_po
+@@ -2768,7 +2761,7 @@ serial8250_do_set_termios(struct uart_po
if (up->capabilities & UART_CAP_RTOIE)
up->ier |= UART_IER_RTOIE;
- serial_port_out(port, UART_IER, up->ier);
-+ set_ier(up, up->ier);
++ serial8250_set_IER(up, up->ier);
if (up->capabilities & UART_CAP_EFR) {
unsigned char efr = 0;
-@@ -3234,7 +3272,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_default
+@@ -3234,7 +3227,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_default
#ifdef CONFIG_SERIAL_8250_CONSOLE
@@ -318,7 +326,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct uart_8250_port *up = up_to_u8250p(port);
-@@ -3242,6 +3280,18 @@ static void serial8250_console_putchar(s
+@@ -3242,6 +3235,18 @@ static void serial8250_console_putchar(s
serial_port_out(port, UART_TX, ch);
}
@@ -337,7 +345,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Restore serial console when h/w power-off detected
*/
-@@ -3263,6 +3313,42 @@ static void serial8250_console_restore(s
+@@ -3263,6 +3268,32 @@ static void serial8250_console_restore(s
serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS);
}
@@ -346,20 +354,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+{
+ struct uart_port *port = &up->port;
+ unsigned int flags;
-+ bool locked;
++ unsigned int ier;
+
+ console_atomic_lock(&flags);
+
-+ /*
-+ * If possible, keep any other CPUs from working with the
-+ * UART until the atomic message is completed. This helps
-+ * to keep the output more orderly.
-+ */
-+ locked = spin_trylock(&port->lock);
-+
+ touch_nmi_watchdog();
+
-+ clear_ier(up);
++ ier = serial8250_clear_IER(up);
+
+ if (atomic_fetch_inc(&up->console_printing)) {
+ uart_console_write(port, "\n", 1,
@@ -369,10 +370,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ atomic_dec(&up->console_printing);
+
+ wait_for_xmitr(up, BOTH_EMPTY);
-+ restore_ier(up);
-+
-+ if (locked)
-+ spin_unlock(&port->lock);
++ serial8250_set_IER(up, ier);
+
+ console_atomic_unlock(flags);
+}
@@ -380,11 +378,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
-@@ -3278,25 +3364,12 @@ void serial8250_console_write(struct uar
- struct uart_8250_em485 *em485 = up->em485;
+@@ -3279,24 +3310,12 @@ void serial8250_console_write(struct uar
struct uart_port *port = &up->port;
unsigned long flags;
-- unsigned int ier;
+ unsigned int ier;
- int locked = 1;
touch_nmi_watchdog();
@@ -404,11 +401,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- serial_port_out(port, UART_IER, UART_IER_UUE);
- else
- serial_port_out(port, UART_IER, 0);
-+ clear_ier(up);
++ ier = serial8250_clear_IER(up);
/* check scratch reg to see if port powered off during system sleep */
if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
-@@ -3310,7 +3383,9 @@ void serial8250_console_write(struct uar
+@@ -3310,7 +3329,9 @@ void serial8250_console_write(struct uar
mdelay(port->rs485.delay_rts_before_send);
}
@@ -418,16 +415,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Finally, wait for transmitter to become empty
-@@ -3324,7 +3399,7 @@ void serial8250_console_write(struct uar
+@@ -3323,8 +3344,7 @@ void serial8250_console_write(struct uar
+ if (em485->tx_stopped)
up->rs485_stop_tx(up);
}
-
+-
- serial_port_out(port, UART_IER, ier);
-+ restore_ier(up);
++ serial8250_set_IER(up, ier);
/*
* The receive handling will happen properly because the
-@@ -3336,8 +3411,7 @@ void serial8250_console_write(struct uar
+@@ -3336,8 +3356,7 @@ void serial8250_console_write(struct uar
if (up->msr_saved_flags)
serial8250_modem_status(up);
@@ -437,7 +435,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static unsigned int probe_baud(struct uart_port *port)
-@@ -3357,6 +3431,7 @@ static unsigned int probe_baud(struct ua
+@@ -3357,6 +3376,7 @@ static unsigned int probe_baud(struct ua
int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
{
@@ -445,7 +443,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int baud = 9600;
int bits = 8;
int parity = 'n';
-@@ -3366,6 +3441,8 @@ int serial8250_console_setup(struct uart
+@@ -3366,6 +3386,8 @@ int serial8250_console_setup(struct uart
if (!port->iobase && !port->membase)
return -ENODEV;
diff --git a/debian/patches-rt/0008-docs-vmcoreinfo-add-lockless-printk-ringbuffer-vmcor.patch b/debian/patches-rt/0008-docs-vmcoreinfo-add-lockless-printk-ringbuffer-vmcor.patch
new file mode 100644
index 000000000..e3f9bf24f
--- /dev/null
+++ b/debian/patches-rt/0008-docs-vmcoreinfo-add-lockless-printk-ringbuffer-vmcor.patch
@@ -0,0 +1,182 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Fri, 14 Aug 2020 23:39:16 +0206
+Subject: [PATCH 08/25] docs: vmcoreinfo: add lockless printk ringbuffer
+ vmcoreinfo
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+With the introduction of the lockless printk ringbuffer, the
+VMCOREINFO relating to the kernel log buffer was changed. Update the
+documentation to match those changes.
+
+Fixes: 896fbe20b4e2333fb55 ("printk: use the lockless ringbuffer")
+Reported-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200814213316.6394-1-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ Documentation/admin-guide/kdump/vmcoreinfo.rst | 143 ++++++++++++++++++-------
+ 1 file changed, 108 insertions(+), 35 deletions(-)
+
+--- a/Documentation/admin-guide/kdump/vmcoreinfo.rst
++++ b/Documentation/admin-guide/kdump/vmcoreinfo.rst
+@@ -189,50 +189,123 @@ from this.
+ Free areas descriptor. User-space tools use this value to iterate the
+ free_area ranges. MAX_ORDER is used by the zone buddy allocator.
+
+-log_first_idx
++prb
++---
++
++A pointer to the printk ringbuffer (struct printk_ringbuffer). This
++may be pointing to the static boot ringbuffer or the dynamically
++allocated ringbuffer, depending on when the the core dump occurred.
++Used by user-space tools to read the active kernel log buffer.
++
++printk_rb_static
++----------------
++
++A pointer to the static boot printk ringbuffer. If @prb has a
++different value, this is useful for viewing the initial boot messages,
++which may have been overwritten in the dynamically allocated
++ringbuffer.
++
++clear_seq
++---------
++
++The sequence number of the printk() record after the last clear
++command. It indicates the first record after the last
++SYSLOG_ACTION_CLEAR, like issued by 'dmesg -c'. Used by user-space
++tools to dump a subset of the dmesg log.
++
++printk_ringbuffer
++-----------------
++
++The size of a printk_ringbuffer structure. This structure contains all
++information required for accessing the various components of the
++kernel log buffer.
++
++(printk_ringbuffer, desc_ring|text_data_ring|dict_data_ring|fail)
++-----------------------------------------------------------------
++
++Offsets for the various components of the printk ringbuffer. Used by
++user-space tools to view the kernel log buffer without requiring the
++declaration of the structure.
++
++prb_desc_ring
+ -------------
+
+-Index of the first record stored in the buffer log_buf. Used by
+-user-space tools to read the strings in the log_buf.
++The size of the prb_desc_ring structure. This structure contains
++information about the set of record descriptors.
++
++(prb_desc_ring, count_bits|descs|head_id|tail_id)
++-------------------------------------------------
++
++Offsets for the fields describing the set of record descriptors. Used
++by user-space tools to be able to traverse the descriptors without
++requiring the declaration of the structure.
++
++prb_desc
++--------
++
++The size of the prb_desc structure. This structure contains
++information about a single record descriptor.
++
++(prb_desc, info|state_var|text_blk_lpos|dict_blk_lpos)
++------------------------------------------------------
++
++Offsets for the fields describing a record descriptors. Used by
++user-space tools to be able to read descriptors without requiring
++the declaration of the structure.
+
+-log_buf
+--------
++prb_data_blk_lpos
++-----------------
+
+-Console output is written to the ring buffer log_buf at index
+-log_first_idx. Used to get the kernel log.
++The size of the prb_data_blk_lpos structure. This structure contains
++information about where the text or dictionary data (data block) is
++located within the respective data ring.
+
+-log_buf_len
++(prb_data_blk_lpos, begin|next)
++-------------------------------
++
++Offsets for the fields describing the location of a data block. Used
++by user-space tools to be able to locate data blocks without
++requiring the declaration of the structure.
++
++printk_info
+ -----------
+
+-log_buf's length.
++The size of the printk_info structure. This structure contains all
++the meta-data for a record.
+
+-clear_idx
+----------
++(printk_info, seq|ts_nsec|text_len|dict_len|caller_id)
++------------------------------------------------------
++
++Offsets for the fields providing the meta-data for a record. Used by
++user-space tools to be able to read the information without requiring
++the declaration of the structure.
++
++prb_data_ring
++-------------
++
++The size of the prb_data_ring structure. This structure contains
++information about a set of data blocks.
++
++(prb_data_ring, size_bits|data|head_lpos|tail_lpos)
++---------------------------------------------------
++
++Offsets for the fields describing a set of data blocks. Used by
++user-space tools to be able to access the data blocks without
++requiring the declaration of the structure.
++
++atomic_long_t
++-------------
+
+-The index that the next printk() record to read after the last clear
+-command. It indicates the first record after the last SYSLOG_ACTION
+-_CLEAR, like issued by 'dmesg -c'. Used by user-space tools to dump
+-the dmesg log.
+-
+-log_next_idx
+-------------
+-
+-The index of the next record to store in the buffer log_buf. Used to
+-compute the index of the current buffer position.
+-
+-printk_log
+-----------
+-
+-The size of a structure printk_log. Used to compute the size of
+-messages, and extract dmesg log. It encapsulates header information for
+-log_buf, such as timestamp, syslog level, etc.
+-
+-(printk_log, ts_nsec|len|text_len|dict_len)
+--------------------------------------------
+-
+-It represents field offsets in struct printk_log. User space tools
+-parse it and check whether the values of printk_log's members have been
+-changed.
++The size of the atomic_long_t structure. Used by user-space tools to
++be able to copy the full structure, regardless of its
++architecture-specific implementation.
++
++(atomic_long_t, counter)
++------------------------
++
++Offset for the long value of an atomic_long_t variable. Used by
++user-space tools to access the long value without requiring the
++architecture-specific declaration.
+
+ (free_area.free_list, MIGRATE_TYPES)
+ ------------------------------------
diff --git a/debian/patches-rt/0009-locking-spinlock-Split-the-lock-types-header.patch b/debian/patches-rt/0008-locking-spinlock-Split-the-lock-types-header.patch
index 729ff4dbb..f433c429e 100644
--- a/debian/patches-rt/0009-locking-spinlock-Split-the-lock-types-header.patch
+++ b/debian/patches-rt/0008-locking-spinlock-Split-the-lock-types-header.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 29 Jun 2011 19:34:01 +0200
-Subject: [PATCH 09/23] locking/spinlock: Split the lock types header
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Subject: [PATCH 08/22] locking/spinlock: Split the lock types header
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Split raw_spinlock into its own file and the remaining spinlock_t into
its own non-RT header. The non-RT header will be replaced later by sleeping
diff --git a/debian/patches-rt/0008-printk-add-ring-buffer-and-kthread.patch b/debian/patches-rt/0008-printk-add-ring-buffer-and-kthread.patch
deleted file mode 100644
index 4dcc8a910..000000000
--- a/debian/patches-rt/0008-printk-add-ring-buffer-and-kthread.patch
+++ /dev/null
@@ -1,169 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:46 +0100
-Subject: [PATCH 08/25] printk: add ring buffer and kthread
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-The printk ring buffer provides an NMI-safe interface for writing
-messages to a ring buffer. Using such a buffer for alleviates printk
-callers from the current burdens of disabled preemption while calling
-the console drivers (and possibly printing out many messages that
-another task put into the log buffer).
-
-Create a ring buffer to be used for storing messages to be
-printed to the consoles.
-
-Create a dedicated printk kthread to block on the ring buffer
-and call the console drivers for the read messages.
-
-NOTE: The printk_delay is relocated to _after_ the message is
- printed, where it makes more sense.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk.c | 105 +++++++++++++++++++++++++++++++++++++++++++++++++
- 1 file changed, 105 insertions(+)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -44,6 +44,8 @@
- #include <linux/irq_work.h>
- #include <linux/ctype.h>
- #include <linux/uio.h>
-+#include <linux/kthread.h>
-+#include <linux/printk_ringbuffer.h>
- #include <linux/sched/clock.h>
- #include <linux/sched/debug.h>
- #include <linux/sched/task_stack.h>
-@@ -417,7 +419,12 @@ DEFINE_RAW_SPINLOCK(logbuf_lock);
- printk_safe_exit_irqrestore(flags); \
- } while (0)
-
-+DECLARE_STATIC_PRINTKRB_CPULOCK(printk_cpulock);
-+
- #ifdef CONFIG_PRINTK
-+/* record buffer */
-+DECLARE_STATIC_PRINTKRB(printk_rb, CONFIG_LOG_BUF_SHIFT, &printk_cpulock);
-+
- DECLARE_WAIT_QUEUE_HEAD(log_wait);
- /* the next printk record to read by syslog(READ) or /proc/kmsg */
- static u64 syslog_seq;
-@@ -792,6 +799,10 @@ static ssize_t msg_print_ext_body(char *
- return p - buf;
- }
-
-+#define PRINTK_SPRINT_MAX (LOG_LINE_MAX + PREFIX_MAX)
-+#define PRINTK_RECORD_MAX (sizeof(struct printk_log) + \
-+ CONSOLE_EXT_LOG_MAX + PRINTK_SPRINT_MAX)
-+
- /* /dev/kmsg - userspace message inject/listen interface */
- struct devkmsg_user {
- u64 seq;
-@@ -1656,6 +1667,34 @@ SYSCALL_DEFINE3(syslog, int, type, char
- return do_syslog(type, buf, len, SYSLOG_FROM_READER);
- }
-
-+static void format_text(struct printk_log *msg, u64 seq,
-+ char *ext_text, size_t *ext_len,
-+ char *text, size_t *len, bool time)
-+{
-+ if (suppress_message_printing(msg->level)) {
-+ /*
-+ * Skip record that has level above the console
-+ * loglevel and update each console's local seq.
-+ */
-+ *len = 0;
-+ *ext_len = 0;
-+ return;
-+ }
-+
-+ *len = msg_print_text(msg, console_msg_format & MSG_FORMAT_SYSLOG,
-+ time, text, PRINTK_SPRINT_MAX);
-+ if (nr_ext_console_drivers) {
-+ *ext_len = msg_print_ext_header(ext_text, CONSOLE_EXT_LOG_MAX,
-+ msg, seq);
-+ *ext_len += msg_print_ext_body(ext_text + *ext_len,
-+ CONSOLE_EXT_LOG_MAX - *ext_len,
-+ log_dict(msg), msg->dict_len,
-+ log_text(msg), msg->text_len);
-+ } else {
-+ *ext_len = 0;
-+ }
-+}
-+
- /*
- * Special console_lock variants that help to reduce the risk of soft-lockups.
- * They allow to pass console_lock to another printk() call using a busy wait.
-@@ -3047,6 +3086,72 @@ void wake_up_klogd(void)
- preempt_enable();
- }
-
-+static int printk_kthread_func(void *data)
-+{
-+ struct prb_iterator iter;
-+ struct printk_log *msg;
-+ size_t ext_len;
-+ char *ext_text;
-+ u64 master_seq;
-+ size_t len;
-+ char *text;
-+ char *buf;
-+ int ret;
-+
-+ ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL);
-+ text = kmalloc(PRINTK_SPRINT_MAX, GFP_KERNEL);
-+ buf = kmalloc(PRINTK_RECORD_MAX, GFP_KERNEL);
-+ if (!ext_text || !text || !buf)
-+ return -1;
-+
-+ prb_iter_init(&iter, &printk_rb, NULL);
-+
-+ /* the printk kthread never exits */
-+ for (;;) {
-+ ret = prb_iter_wait_next(&iter, buf,
-+ PRINTK_RECORD_MAX, &master_seq);
-+ if (ret == -ERESTARTSYS) {
-+ continue;
-+ } else if (ret < 0) {
-+ /* iterator invalid, start over */
-+ prb_iter_init(&iter, &printk_rb, NULL);
-+ continue;
-+ }
-+
-+ msg = (struct printk_log *)buf;
-+ format_text(msg, master_seq, ext_text, &ext_len, text,
-+ &len, printk_time);
-+
-+ console_lock();
-+ if (len > 0 || ext_len > 0) {
-+ call_console_drivers(ext_text, ext_len, text, len);
-+ boot_delay_msec(msg->level);
-+ printk_delay();
-+ }
-+ console_unlock();
-+ }
-+
-+ kfree(ext_text);
-+ kfree(text);
-+ kfree(buf);
-+
-+ return 0;
-+}
-+
-+static int __init init_printk_kthread(void)
-+{
-+ struct task_struct *thread;
-+
-+ thread = kthread_run(printk_kthread_func, NULL, "printk");
-+ if (IS_ERR(thread)) {
-+ pr_err("printk: unable to create printing thread\n");
-+ return PTR_ERR(thread);
-+ }
-+
-+ return 0;
-+}
-+late_initcall(init_printk_kthread);
-+
- void defer_console_output(void)
- {
- if (!printk_percpu_data_ready())
diff --git a/debian/patches-rt/0008-printk-inline-log_output-log_store-in-vprintk_store.patch b/debian/patches-rt/0008-printk-inline-log_output-log_store-in-vprintk_store.patch
new file mode 100644
index 000000000..7766d5155
--- /dev/null
+++ b/debian/patches-rt/0008-printk-inline-log_output-log_store-in-vprintk_store.patch
@@ -0,0 +1,190 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 19 Oct 2020 16:40:26 +0206
+Subject: [PATCH 08/15] printk: inline log_output(),log_store() in
+ vprintk_store()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+In preparation for supporting atomic printing, inline log_output()
+and log_store() into vprintk_store(). This allows these
+sub-functions to more easily communicate if they have performed
+a finalized commit as well as the sequence number of that commit.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 141 +++++++++++++++++++++----------------------------
+ 1 file changed, 63 insertions(+), 78 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -465,52 +465,6 @@ static void truncate_msg(u16 *text_len,
+ *trunc_msg_len = 0;
+ }
+
+-/* insert record into the buffer, discard old ones, update heads */
+-static int log_store(u32 caller_id, int facility, int level,
+- enum log_flags flags, u64 ts_nsec,
+- const struct dev_printk_info *dev_info,
+- const char *text, u16 text_len)
+-{
+- struct prb_reserved_entry e;
+- struct printk_record r;
+- u16 trunc_msg_len = 0;
+-
+- prb_rec_init_wr(&r, text_len);
+-
+- if (!prb_reserve(&e, prb, &r)) {
+- /* truncate the message if it is too long for empty buffer */
+- truncate_msg(&text_len, &trunc_msg_len);
+- prb_rec_init_wr(&r, text_len + trunc_msg_len);
+- /* survive when the log buffer is too small for trunc_msg */
+- if (!prb_reserve(&e, prb, &r))
+- return 0;
+- }
+-
+- /* fill message */
+- memcpy(&r.text_buf[0], text, text_len);
+- if (trunc_msg_len)
+- memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len);
+- r.info->text_len = text_len + trunc_msg_len;
+- r.info->facility = facility;
+- r.info->level = level & 7;
+- r.info->flags = flags & 0x1f;
+- if (ts_nsec > 0)
+- r.info->ts_nsec = ts_nsec;
+- else
+- r.info->ts_nsec = local_clock();
+- r.info->caller_id = caller_id;
+- if (dev_info)
+- memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
+-
+- /* insert message */
+- if ((flags & LOG_CONT) || !(flags & LOG_NEWLINE))
+- prb_commit(&e);
+- else
+- prb_final_commit(&e);
+-
+- return (text_len + trunc_msg_len);
+-}
+-
+ int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
+
+ static int syslog_action_restricted(int type)
+@@ -2001,45 +1955,24 @@ static inline u32 printk_caller_id(void)
+ 0x80000000 + raw_smp_processor_id();
+ }
+
+-static size_t log_output(int facility, int level, enum log_flags lflags,
+- const struct dev_printk_info *dev_info,
+- char *text, size_t text_len)
+-{
+- const u32 caller_id = printk_caller_id();
+-
+- if (lflags & LOG_CONT) {
+- struct prb_reserved_entry e;
+- struct printk_record r;
+-
+- prb_rec_init_wr(&r, text_len);
+- if (prb_reserve_in_last(&e, prb, &r, caller_id, LOG_LINE_MAX)) {
+- memcpy(&r.text_buf[r.info->text_len], text, text_len);
+- r.info->text_len += text_len;
+- if (lflags & LOG_NEWLINE) {
+- r.info->flags |= LOG_NEWLINE;
+- prb_final_commit(&e);
+- } else {
+- prb_commit(&e);
+- }
+- return text_len;
+- }
+- }
+-
+- /* Store it in the record log */
+- return log_store(caller_id, facility, level, lflags, 0,
+- dev_info, text, text_len);
+-}
+-
+ int vprintk_store(int facility, int level,
+ const struct dev_printk_info *dev_info,
+ const char *fmt, va_list args)
+ {
+- size_t text_len;
++ const u32 caller_id = printk_caller_id();
++ struct prb_reserved_entry e;
+ enum log_flags lflags = 0;
+ unsigned long irqflags;
++ struct printk_record r;
++ u16 trunc_msg_len = 0;
+ int sprint_id;
++ u16 text_len;
++ u64 ts_nsec;
++ int ret = 0;
+ char *text;
+- int ret;
++ u64 seq;
++
++ ts_nsec = local_clock();
+
+ /* No buffer is available if printk has recursed too much. */
+ text = get_sprint_buf(&sprint_id, &irqflags);
+@@ -2083,8 +2016,60 @@ int vprintk_store(int facility, int leve
+ if (dev_info)
+ lflags |= LOG_NEWLINE;
+
+- ret = log_output(facility, level, lflags, dev_info, text, text_len);
++ if (lflags & LOG_CONT) {
++ prb_rec_init_wr(&r, text_len);
++ if (prb_reserve_in_last(&e, prb, &r, caller_id, LOG_LINE_MAX)) {
++ seq = r.info->seq;
++ memcpy(&r.text_buf[r.info->text_len], text, text_len);
++ r.info->text_len += text_len;
++ if (lflags & LOG_NEWLINE) {
++ r.info->flags |= LOG_NEWLINE;
++ prb_final_commit(&e);
++ } else {
++ prb_commit(&e);
++ }
++ ret = text_len;
++ goto out;
++ }
++ }
+
++ /* Store it in the record log */
++
++ prb_rec_init_wr(&r, text_len);
++
++ if (!prb_reserve(&e, prb, &r)) {
++ /* truncate the message if it is too long for empty buffer */
++ truncate_msg(&text_len, &trunc_msg_len);
++ prb_rec_init_wr(&r, text_len + trunc_msg_len);
++ /* survive when the log buffer is too small for trunc_msg */
++ if (!prb_reserve(&e, prb, &r))
++ goto out;
++ }
++
++ seq = r.info->seq;
++
++ /* fill message */
++ memcpy(&r.text_buf[0], text, text_len);
++ if (trunc_msg_len)
++ memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len);
++ r.info->text_len = text_len + trunc_msg_len;
++ r.info->facility = facility;
++ r.info->level = level & 7;
++ r.info->flags = lflags & 0x1f;
++ r.info->ts_nsec = ts_nsec;
++ r.info->caller_id = caller_id;
++ if (dev_info)
++ memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
++
++ /* insert message */
++ if ((lflags & LOG_CONT) || !(lflags & LOG_NEWLINE)) {
++ prb_commit(&e);
++ } else {
++ prb_final_commit(&e);
++ }
++
++ ret = text_len + trunc_msg_len;
++out:
+ put_sprint_buf(sprint_id, irqflags);
+ return ret;
+ }
diff --git a/debian/patches-rt/0008-sched-Massage-set_cpus_allowed.patch b/debian/patches-rt/0008-sched-Massage-set_cpus_allowed.patch
index 6b25853a7..1f01b633c 100644
--- a/debian/patches-rt/0008-sched-Massage-set_cpus_allowed.patch
+++ b/debian/patches-rt/0008-sched-Massage-set_cpus_allowed.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz@infradead.org>
-Date: Mon, 5 Oct 2020 16:57:25 +0200
-Subject: [PATCH 08/17] sched: Massage set_cpus_allowed()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Date: Fri, 23 Oct 2020 12:12:06 +0200
+Subject: [PATCH 08/19] sched: Massage set_cpus_allowed()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Thread a u32 flags word through the *set_cpus_allowed*() callchain.
This will allow adding behavioural tweaks for future users.
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
return set_cpus_allowed_ptr(p, new_mask);
}
-@@ -6008,7 +6016,7 @@ long sched_setaffinity(pid_t pid, const
+@@ -6007,7 +6015,7 @@ long sched_setaffinity(pid_t pid, const
}
#endif
again:
@@ -110,7 +110,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!retval) {
cpuset_cpus_allowed(p, cpus_allowed);
-@@ -6591,7 +6599,7 @@ void init_idle(struct task_struct *idle,
+@@ -6590,7 +6598,7 @@ void init_idle(struct task_struct *idle,
*
* And since this is boot we can forgo the serialization.
*/
diff --git a/debian/patches-rt/0008-seqlock-seqcount-latch-APIs-Only-allow-seqcount_latc.patch b/debian/patches-rt/0008-seqlock-seqcount-latch-APIs-Only-allow-seqcount_latc.patch
index 2503d9070..6129bde48 100644
--- a/debian/patches-rt/0008-seqlock-seqcount-latch-APIs-Only-allow-seqcount_latc.patch
+++ b/debian/patches-rt/0008-seqlock-seqcount-latch-APIs-Only-allow-seqcount_latc.patch
@@ -2,7 +2,7 @@ From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
Date: Thu, 27 Aug 2020 13:40:44 +0200
Subject: [PATCH 08/13] seqlock: seqcount latch APIs: Only allow
seqcount_latch_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
All latch sequence counter call-sites have now been converted from plain
seqcount_t to the new seqcount_latch_t data type.
diff --git a/debian/patches-rt/0010-locking-rtmutex-Avoid-include-hell.patch b/debian/patches-rt/0009-locking-rtmutex-Avoid-include-hell.patch
index 8fbe38b32..8f988f533 100644
--- a/debian/patches-rt/0010-locking-rtmutex-Avoid-include-hell.patch
+++ b/debian/patches-rt/0009-locking-rtmutex-Avoid-include-hell.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 29 Jun 2011 20:06:39 +0200
-Subject: [PATCH 10/23] locking/rtmutex: Avoid include hell
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Subject: [PATCH 09/22] locking/rtmutex: Avoid include hell
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Include only the required raw types. This avoids pulling in the
complete spinlock header which in turn requires rtmutex.h at some point.
diff --git a/debian/patches-rt/0009-printk-relocate-printk_delay-and-vprintk_default.patch b/debian/patches-rt/0009-printk-relocate-printk_delay-and-vprintk_default.patch
new file mode 100644
index 000000000..69fe41792
--- /dev/null
+++ b/debian/patches-rt/0009-printk-relocate-printk_delay-and-vprintk_default.patch
@@ -0,0 +1,83 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 19 Oct 2020 21:02:40 +0206
+Subject: [PATCH 09/15] printk: relocate printk_delay() and vprintk_default()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+Move printk_delay() and vprintk_default() "as is" further up so that
+they can be used by new functions in an upcoming commit.
+
+Signed-off-by: John Ogness <john.ogness@linutornix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 40 ++++++++++++++++++++--------------------
+ 1 file changed, 20 insertions(+), 20 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1751,6 +1751,20 @@ static void put_sprint_buf(int id, unsig
+ local_irq_restore(flags);
+ }
+
++int printk_delay_msec __read_mostly;
++
++static inline void printk_delay(void)
++{
++ if (unlikely(printk_delay_msec)) {
++ int m = printk_delay_msec;
++
++ while (m--) {
++ mdelay(1);
++ touch_nmi_watchdog();
++ }
++ }
++}
++
+ /*
+ * Special console_lock variants that help to reduce the risk of soft-lockups.
+ * They allow to pass console_lock to another printk() call using a busy wait.
+@@ -1935,20 +1949,6 @@ static void call_console_drivers(const c
+ }
+ }
+
+-int printk_delay_msec __read_mostly;
+-
+-static inline void printk_delay(void)
+-{
+- if (unlikely(printk_delay_msec)) {
+- int m = printk_delay_msec;
+-
+- while (m--) {
+- mdelay(1);
+- touch_nmi_watchdog();
+- }
+- }
+-}
+-
+ static inline u32 printk_caller_id(void)
+ {
+ return in_task() ? task_pid_nr(current) :
+@@ -2119,18 +2119,18 @@ asmlinkage int vprintk_emit(int facility
+ }
+ EXPORT_SYMBOL(vprintk_emit);
+
+-asmlinkage int vprintk(const char *fmt, va_list args)
+-{
+- return vprintk_func(fmt, args);
+-}
+-EXPORT_SYMBOL(vprintk);
+-
+ int vprintk_default(const char *fmt, va_list args)
+ {
+ return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
+ }
+ EXPORT_SYMBOL_GPL(vprintk_default);
+
++asmlinkage int vprintk(const char *fmt, va_list args)
++{
++ return vprintk_func(fmt, args);
++}
++EXPORT_SYMBOL(vprintk);
++
+ /**
+ * printk - print a kernel message
+ * @fmt: format string
diff --git a/debian/patches-rt/0009-printk-remove-exclusive-console-hack.patch b/debian/patches-rt/0009-printk-remove-exclusive-console-hack.patch
deleted file mode 100644
index de7b30488..000000000
--- a/debian/patches-rt/0009-printk-remove-exclusive-console-hack.patch
+++ /dev/null
@@ -1,102 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:47 +0100
-Subject: [PATCH 09/25] printk: remove exclusive console hack
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-In order to support printing the printk log history when new
-consoles are registered, a global exclusive_console variable is
-temporarily set. This only works because printk runs with
-preemption disabled.
-
-When console printing is moved to a fully preemptible dedicated
-kthread, this hack no longer works.
-
-Remove exclusive_console usage.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk.c | 30 ++++--------------------------
- 1 file changed, 4 insertions(+), 26 deletions(-)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -268,11 +268,6 @@ static void __up_console_sem(unsigned lo
- static int console_locked, console_suspended;
-
- /*
-- * If exclusive_console is non-NULL then only this console is to be printed to.
-- */
--static struct console *exclusive_console;
--
--/*
- * Array of consoles built from command line options (console=)
- */
-
-@@ -443,7 +438,6 @@ static u32 log_next_idx;
- /* the next printk record to write to the console */
- static u64 console_seq;
- static u32 console_idx;
--static u64 exclusive_console_stop_seq;
-
- /* the next printk record to read after the last 'clear' command */
- static u64 clear_seq;
-@@ -1848,8 +1842,6 @@ static void call_console_drivers(const c
- trace_console_rcuidle(text, len);
-
- for_each_console(con) {
-- if (exclusive_console && con != exclusive_console)
-- continue;
- if (!(con->flags & CON_ENABLED))
- continue;
- if (!con->write)
-@@ -2131,7 +2123,6 @@ static u64 syslog_seq;
- static u32 syslog_idx;
- static u64 console_seq;
- static u32 console_idx;
--static u64 exclusive_console_stop_seq;
- static u64 log_first_seq;
- static u32 log_first_idx;
- static u64 log_next_seq;
-@@ -2506,12 +2497,6 @@ void console_unlock(void)
- goto skip;
- }
-
-- /* Output to all consoles once old messages replayed. */
-- if (unlikely(exclusive_console &&
-- console_seq >= exclusive_console_stop_seq)) {
-- exclusive_console = NULL;
-- }
--
- len += msg_print_text(msg,
- console_msg_format & MSG_FORMAT_SYSLOG,
- printk_time, text + len, sizeof(text) - len);
-@@ -2865,17 +2850,6 @@ void register_console(struct console *ne
- * for us.
- */
- logbuf_lock_irqsave(flags);
-- /*
-- * We're about to replay the log buffer. Only do this to the
-- * just-registered console to avoid excessive message spam to
-- * the already-registered consoles.
-- *
-- * Set exclusive_console with disabled interrupts to reduce
-- * race window with eventual console_flush_on_panic() that
-- * ignores console_lock.
-- */
-- exclusive_console = newcon;
-- exclusive_console_stop_seq = console_seq;
- console_seq = syslog_seq;
- console_idx = syslog_idx;
- logbuf_unlock_irqrestore(flags);
-@@ -2889,6 +2863,10 @@ void register_console(struct console *ne
- * boot consoles, real consoles, etc - this is to ensure that end
- * users know there might be something in the kernel's log buffer that
- * went to the bootconsole (that they do not see on the real console)
-+ *
-+ * This message is also important because it will trigger the
-+ * printk kthread to begin dumping the log buffer to the newly
-+ * registered console.
- */
- pr_info("%sconsole [%s%d] enabled\n",
- (newcon->flags & CON_BOOT) ? "boot" : "" ,
diff --git a/debian/patches-rt/0009-sched-Add-migrate_disable.patch b/debian/patches-rt/0009-sched-Add-migrate_disable.patch
index 72018258a..49ef58097 100644
--- a/debian/patches-rt/0009-sched-Add-migrate_disable.patch
+++ b/debian/patches-rt/0009-sched-Add-migrate_disable.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz@infradead.org>
-Date: Mon, 5 Oct 2020 16:57:26 +0200
-Subject: [PATCH 09/17] sched: Add migrate_disable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Date: Fri, 23 Oct 2020 12:12:07 +0200
+Subject: [PATCH 09/19] sched: Add migrate_disable()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Add the base migrate_disable() support (under protest).
@@ -15,16 +15,16 @@ ease of review.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/preempt.h | 60 +++++++++++++++++++++++++
+ include/linux/preempt.h | 65 +++++++++++++++++++++++++++
include/linux/sched.h | 3 +
kernel/sched/core.c | 112 +++++++++++++++++++++++++++++++++++++++++++++---
kernel/sched/sched.h | 6 +-
lib/smp_processor_id.c | 5 ++
- 5 files changed, 178 insertions(+), 8 deletions(-)
+ 5 files changed, 183 insertions(+), 8 deletions(-)
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -322,6 +322,64 @@ static inline void preempt_notifier_init
+@@ -322,6 +322,69 @@ static inline void preempt_notifier_init
#endif
@@ -84,12 +84,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+
-+#else /* !(CONFIG_SMP && CONFIG_PREEMPT_RT) */
++#elif defined(CONFIG_PREEMPT_RT)
++
++static inline void migrate_disable(void) { }
++static inline void migrate_enable(void) { }
++
++#else /* !CONFIG_PREEMPT_RT */
+
/**
* migrate_disable - Prevent migration of the current task
*
-@@ -352,4 +410,6 @@ static __always_inline void migrate_enab
+@@ -352,4 +415,6 @@ static __always_inline void migrate_enab
preempt_enable();
}
@@ -292,7 +297,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void
ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
{
-@@ -4575,6 +4674,7 @@ static void __sched notrace __schedule(b
+@@ -4574,6 +4673,7 @@ static void __sched notrace __schedule(b
*/
++*switch_count;
diff --git a/debian/patches-rt/0009-scripts-gdb-add-utils.read_ulong.patch b/debian/patches-rt/0009-scripts-gdb-add-utils.read_ulong.patch
new file mode 100644
index 000000000..97f6783a2
--- /dev/null
+++ b/debian/patches-rt/0009-scripts-gdb-add-utils.read_ulong.patch
@@ -0,0 +1,35 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Fri, 14 Aug 2020 23:31:24 +0206
+Subject: [PATCH 09/25] scripts/gdb: add utils.read_ulong()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+Add a function for reading unsigned long values, which vary in size
+depending on the architecture.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
+Tested-by: Nick Desaulniers <ndesaulniers@google.com>
+Tested-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200814212525.6118-2-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ scripts/gdb/linux/utils.py | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/scripts/gdb/linux/utils.py
++++ b/scripts/gdb/linux/utils.py
+@@ -123,6 +123,13 @@ target_endianness = None
+ return read_u32(buffer, offset + 4) + (read_u32(buffer, offset) << 32)
+
+
++def read_ulong(buffer, offset):
++ if get_long_type().sizeof == 8:
++ return read_u64(buffer, offset)
++ else:
++ return read_u32(buffer, offset)
++
++
+ target_arch = None
+
+
diff --git a/debian/patches-rt/0009-seqlock-seqcount_LOCKNAME_t-Standardize-naming-conve.patch b/debian/patches-rt/0009-seqlock-seqcount_LOCKNAME_t-Standardize-naming-conve.patch
index 8d6f009ef..330ae97c5 100644
--- a/debian/patches-rt/0009-seqlock-seqcount_LOCKNAME_t-Standardize-naming-conve.patch
+++ b/debian/patches-rt/0009-seqlock-seqcount_LOCKNAME_t-Standardize-naming-conve.patch
@@ -2,7 +2,7 @@ From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
Date: Fri, 4 Sep 2020 17:32:27 +0200
Subject: [PATCH 09/13] seqlock: seqcount_LOCKNAME_t: Standardize naming
convention
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
At seqlock.h, sequence counters with associated locks are either called
seqcount_LOCKNAME_t, seqcount_LOCKTYPE_t, or seqcount_locktype_t.
diff --git a/debian/patches-rt/0011-lockdep-Reduce-header-files-in-debug_locks.h.patch b/debian/patches-rt/0010-lockdep-Reduce-header-files-in-debug_locks.h.patch
index 0114a5b49..d20295df6 100644
--- a/debian/patches-rt/0011-lockdep-Reduce-header-files-in-debug_locks.h.patch
+++ b/debian/patches-rt/0010-lockdep-Reduce-header-files-in-debug_locks.h.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 14 Aug 2020 16:55:25 +0200
Subject: [PATCH 11/23] lockdep: Reduce header files in debug_locks.h
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The inclusion of kernel.h leads to circular dependency if spinlock_t is
based on rt_mutex.
diff --git a/debian/patches-rt/0010-printk-combine-boot_delay_msec-into-printk_delay.patch b/debian/patches-rt/0010-printk-combine-boot_delay_msec-into-printk_delay.patch
new file mode 100644
index 000000000..92bcbc385
--- /dev/null
+++ b/debian/patches-rt/0010-printk-combine-boot_delay_msec-into-printk_delay.patch
@@ -0,0 +1,38 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 19 Oct 2020 22:11:31 +0206
+Subject: [PATCH 10/15] printk: combine boot_delay_msec() into printk_delay()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+boot_delay_msec() is always called immediately before printk_delay()
+so just combine the two.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1753,8 +1753,10 @@ static void put_sprint_buf(int id, unsig
+
+ int printk_delay_msec __read_mostly;
+
+-static inline void printk_delay(void)
++static inline void printk_delay(int level)
+ {
++ boot_delay_msec(level);
++
+ if (unlikely(printk_delay_msec)) {
+ int m = printk_delay_msec;
+
+@@ -2090,8 +2092,7 @@ asmlinkage int vprintk_emit(int facility
+ in_sched = true;
+ }
+
+- boot_delay_msec(level);
+- printk_delay();
++ printk_delay(level);
+
+ /* This stops the holder of console_sem just where we want him */
+ printed_len = vprintk_store(facility, level, dev_info, fmt, args);
diff --git a/debian/patches-rt/0010-printk-redirect-emit-store-to-new-ringbuffer.patch b/debian/patches-rt/0010-printk-redirect-emit-store-to-new-ringbuffer.patch
deleted file mode 100644
index 65e987bf6..000000000
--- a/debian/patches-rt/0010-printk-redirect-emit-store-to-new-ringbuffer.patch
+++ /dev/null
@@ -1,438 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:48 +0100
-Subject: [PATCH 10/25] printk: redirect emit/store to new ringbuffer
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-vprintk_emit and vprintk_store are the main functions that all printk
-variants eventually go through. Change these to store the message in
-the new printk ring buffer that the printk kthread is reading.
-
-Remove functions no longer in use because of the changes to
-vprintk_emit and vprintk_store.
-
-In order to handle interrupts and NMIs, a second per-cpu ring buffer
-(sprint_rb) is added. This ring buffer is used for NMI-safe memory
-allocation in order to format the printk messages.
-
-NOTE: LOG_CONT is ignored for now and handled as individual messages.
- LOG_CONT functions are masked behind "#if 0" blocks until their
- functionality can be restored
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk.c | 325 +++++++------------------------------------------
- 1 file changed, 51 insertions(+), 274 deletions(-)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -529,90 +529,6 @@ static u32 log_next(u32 idx)
- return idx + msg->len;
- }
-
--/*
-- * Check whether there is enough free space for the given message.
-- *
-- * The same values of first_idx and next_idx mean that the buffer
-- * is either empty or full.
-- *
-- * If the buffer is empty, we must respect the position of the indexes.
-- * They cannot be reset to the beginning of the buffer.
-- */
--static int logbuf_has_space(u32 msg_size, bool empty)
--{
-- u32 free;
--
-- if (log_next_idx > log_first_idx || empty)
-- free = max(log_buf_len - log_next_idx, log_first_idx);
-- else
-- free = log_first_idx - log_next_idx;
--
-- /*
-- * We need space also for an empty header that signalizes wrapping
-- * of the buffer.
-- */
-- return free >= msg_size + sizeof(struct printk_log);
--}
--
--static int log_make_free_space(u32 msg_size)
--{
-- while (log_first_seq < log_next_seq &&
-- !logbuf_has_space(msg_size, false)) {
-- /* drop old messages until we have enough contiguous space */
-- log_first_idx = log_next(log_first_idx);
-- log_first_seq++;
-- }
--
-- if (clear_seq < log_first_seq) {
-- clear_seq = log_first_seq;
-- clear_idx = log_first_idx;
-- }
--
-- /* sequence numbers are equal, so the log buffer is empty */
-- if (logbuf_has_space(msg_size, log_first_seq == log_next_seq))
-- return 0;
--
-- return -ENOMEM;
--}
--
--/* compute the message size including the padding bytes */
--static u32 msg_used_size(u16 text_len, u16 dict_len, u32 *pad_len)
--{
-- u32 size;
--
-- size = sizeof(struct printk_log) + text_len + dict_len;
-- *pad_len = (-size) & (LOG_ALIGN - 1);
-- size += *pad_len;
--
-- return size;
--}
--
--/*
-- * Define how much of the log buffer we could take at maximum. The value
-- * must be greater than two. Note that only half of the buffer is available
-- * when the index points to the middle.
-- */
--#define MAX_LOG_TAKE_PART 4
--static const char trunc_msg[] = "<truncated>";
--
--static u32 truncate_msg(u16 *text_len, u16 *trunc_msg_len,
-- u16 *dict_len, u32 *pad_len)
--{
-- /*
-- * The message should not take the whole buffer. Otherwise, it might
-- * get removed too soon.
-- */
-- u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART;
-- if (*text_len > max_text_len)
-- *text_len = max_text_len;
-- /* enable the warning message */
-- *trunc_msg_len = strlen(trunc_msg);
-- /* disable the "dict" completely */
-- *dict_len = 0;
-- /* compute the size again, count also the warning message */
-- return msg_used_size(*text_len + *trunc_msg_len, 0, pad_len);
--}
--
- /* insert record into the buffer, discard old ones, update heads */
- static int log_store(u32 caller_id, int facility, int level,
- enum log_flags flags, u64 ts_nsec,
-@@ -620,57 +536,39 @@ static int log_store(u32 caller_id, int
- const char *text, u16 text_len)
- {
- struct printk_log *msg;
-- u32 size, pad_len;
-- u16 trunc_msg_len = 0;
--
-- /* number of '\0' padding bytes to next message */
-- size = msg_used_size(text_len, dict_len, &pad_len);
-+ struct prb_handle h;
-+ char *rbuf;
-+ u32 size;
-
-- if (log_make_free_space(size)) {
-- /* truncate the message if it is too long for empty buffer */
-- size = truncate_msg(&text_len, &trunc_msg_len,
-- &dict_len, &pad_len);
-- /* survive when the log buffer is too small for trunc_msg */
-- if (log_make_free_space(size))
-- return 0;
-- }
-+ size = sizeof(*msg) + text_len + dict_len;
-
-- if (log_next_idx + size + sizeof(struct printk_log) > log_buf_len) {
-+ rbuf = prb_reserve(&h, &printk_rb, size);
-+ if (!rbuf) {
- /*
-- * This message + an additional empty header does not fit
-- * at the end of the buffer. Add an empty header with len == 0
-- * to signify a wrap around.
-+ * An emergency message would have been printed, but
-+ * it cannot be stored in the log.
- */
-- memset(log_buf + log_next_idx, 0, sizeof(struct printk_log));
-- log_next_idx = 0;
-+ prb_inc_lost(&printk_rb);
-+ return 0;
- }
-
- /* fill message */
-- msg = (struct printk_log *)(log_buf + log_next_idx);
-+ msg = (struct printk_log *)rbuf;
- memcpy(log_text(msg), text, text_len);
- msg->text_len = text_len;
-- if (trunc_msg_len) {
-- memcpy(log_text(msg) + text_len, trunc_msg, trunc_msg_len);
-- msg->text_len += trunc_msg_len;
-- }
- memcpy(log_dict(msg), dict, dict_len);
- msg->dict_len = dict_len;
- msg->facility = facility;
- msg->level = level & 7;
- msg->flags = flags & 0x1f;
-- if (ts_nsec > 0)
-- msg->ts_nsec = ts_nsec;
-- else
-- msg->ts_nsec = local_clock();
-+ msg->ts_nsec = ts_nsec;
- #ifdef CONFIG_PRINTK_CALLER
- msg->caller_id = caller_id;
- #endif
-- memset(log_dict(msg) + dict_len, 0, pad_len);
- msg->len = size;
-
- /* insert message */
-- log_next_idx += msg->len;
-- log_next_seq++;
-+ prb_commit(&h);
-
- return msg->text_len;
- }
-@@ -1765,70 +1663,6 @@ static int console_lock_spinning_disable
- return 1;
- }
-
--/**
-- * console_trylock_spinning - try to get console_lock by busy waiting
-- *
-- * This allows to busy wait for the console_lock when the current
-- * owner is running in specially marked sections. It means that
-- * the current owner is running and cannot reschedule until it
-- * is ready to lose the lock.
-- *
-- * Return: 1 if we got the lock, 0 othrewise
-- */
--static int console_trylock_spinning(void)
--{
-- struct task_struct *owner = NULL;
-- bool waiter;
-- bool spin = false;
-- unsigned long flags;
--
-- if (console_trylock())
-- return 1;
--
-- printk_safe_enter_irqsave(flags);
--
-- raw_spin_lock(&console_owner_lock);
-- owner = READ_ONCE(console_owner);
-- waiter = READ_ONCE(console_waiter);
-- if (!waiter && owner && owner != current) {
-- WRITE_ONCE(console_waiter, true);
-- spin = true;
-- }
-- raw_spin_unlock(&console_owner_lock);
--
-- /*
-- * If there is an active printk() writing to the
-- * consoles, instead of having it write our data too,
-- * see if we can offload that load from the active
-- * printer, and do some printing ourselves.
-- * Go into a spin only if there isn't already a waiter
-- * spinning, and there is an active printer, and
-- * that active printer isn't us (recursive printk?).
-- */
-- if (!spin) {
-- printk_safe_exit_irqrestore(flags);
-- return 0;
-- }
--
-- /* We spin waiting for the owner to release us */
-- spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
-- /* Owner will clear console_waiter on hand off */
-- while (READ_ONCE(console_waiter))
-- cpu_relax();
-- spin_release(&console_owner_dep_map, _THIS_IP_);
--
-- printk_safe_exit_irqrestore(flags);
-- /*
-- * The owner passed the console lock to us.
-- * Since we did not spin on console lock, annotate
-- * this as a trylock. Otherwise lockdep will
-- * complain.
-- */
-- mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
--
-- return 1;
--}
--
- /*
- * Call the console drivers, asking them to write out
- * log_buf[start] to log_buf[end - 1].
-@@ -1846,7 +1680,7 @@ static void call_console_drivers(const c
- continue;
- if (!con->write)
- continue;
-- if (!cpu_online(smp_processor_id()) &&
-+ if (!cpu_online(raw_smp_processor_id()) &&
- !(con->flags & CON_ANYTIME))
- continue;
- if (con->flags & CON_EXTENDED)
-@@ -1876,6 +1710,8 @@ static inline u32 printk_caller_id(void)
- 0x80000000 + raw_smp_processor_id();
- }
-
-+/* FIXME: no support for LOG_CONT */
-+#if 0
- /*
- * Continuation lines are buffered, and not committed to the record buffer
- * until the line is complete, or a race forces it. The line fragments
-@@ -1931,56 +1767,45 @@ static bool cont_add(u32 caller_id, int
-
- return true;
- }
-+#endif /* 0 */
-
--static size_t log_output(int facility, int level, enum log_flags lflags, const char *dict, size_t dictlen, char *text, size_t text_len)
--{
-- const u32 caller_id = printk_caller_id();
--
-- /*
-- * If an earlier line was buffered, and we're a continuation
-- * write from the same context, try to add it to the buffer.
-- */
-- if (cont.len) {
-- if (cont.caller_id == caller_id && (lflags & LOG_CONT)) {
-- if (cont_add(caller_id, facility, level, lflags, text, text_len))
-- return text_len;
-- }
-- /* Otherwise, make sure it's flushed */
-- cont_flush();
-- }
--
-- /* Skip empty continuation lines that couldn't be added - they just flush */
-- if (!text_len && (lflags & LOG_CONT))
-- return 0;
--
-- /* If it doesn't end in a newline, try to buffer the current line */
-- if (!(lflags & LOG_NEWLINE)) {
-- if (cont_add(caller_id, facility, level, lflags, text, text_len))
-- return text_len;
-- }
--
-- /* Store it in the record log */
-- return log_store(caller_id, facility, level, lflags, 0,
-- dict, dictlen, text, text_len);
--}
--
--/* Must be called under logbuf_lock. */
- int vprintk_store(int facility, int level,
- const char *dict, size_t dictlen,
- const char *fmt, va_list args)
- {
-- static char textbuf[LOG_LINE_MAX];
-- char *text = textbuf;
-- size_t text_len;
-+ return vprintk_emit(facility, level, dict, dictlen, fmt, args);
-+}
-+
-+/* ring buffer used as memory allocator for temporary sprint buffers */
-+DECLARE_STATIC_PRINTKRB(sprint_rb,
-+ ilog2(PRINTK_RECORD_MAX + sizeof(struct prb_entry) +
-+ sizeof(long)) + 2, &printk_cpulock);
-+
-+asmlinkage int vprintk_emit(int facility, int level,
-+ const char *dict, size_t dictlen,
-+ const char *fmt, va_list args)
-+{
-+ const u32 caller_id = printk_caller_id();
- enum log_flags lflags = 0;
-+ int printed_len = 0;
-+ struct prb_handle h;
-+ size_t text_len;
-+ u64 ts_nsec;
-+ char *text;
-+ char *rbuf;
-
-- /*
-- * The printf needs to come first; we need the syslog
-- * prefix which might be passed-in as a parameter.
-- */
-- text_len = vscnprintf(text, sizeof(textbuf), fmt, args);
-+ ts_nsec = local_clock();
-+
-+ rbuf = prb_reserve(&h, &sprint_rb, PRINTK_SPRINT_MAX);
-+ if (!rbuf) {
-+ prb_inc_lost(&printk_rb);
-+ return printed_len;
-+ }
-+
-+ text = rbuf;
-+ text_len = vscnprintf(text, PRINTK_SPRINT_MAX, fmt, args);
-
-- /* mark and strip a trailing newline */
-+ /* strip and flag a trailing newline */
- if (text_len && text[text_len-1] == '\n') {
- text_len--;
- lflags |= LOG_NEWLINE;
-@@ -2011,58 +1836,10 @@ int vprintk_store(int facility, int leve
- if (dict)
- lflags |= LOG_NEWLINE;
-
-- return log_output(facility, level, lflags,
-- dict, dictlen, text, text_len);
--}
--
--asmlinkage int vprintk_emit(int facility, int level,
-- const char *dict, size_t dictlen,
-- const char *fmt, va_list args)
--{
-- int printed_len;
-- bool in_sched = false, pending_output;
-- unsigned long flags;
-- u64 curr_log_seq;
--
-- /* Suppress unimportant messages after panic happens */
-- if (unlikely(suppress_printk))
-- return 0;
--
-- if (level == LOGLEVEL_SCHED) {
-- level = LOGLEVEL_DEFAULT;
-- in_sched = true;
-- }
--
-- boot_delay_msec(level);
-- printk_delay();
--
-- /* This stops the holder of console_sem just where we want him */
-- logbuf_lock_irqsave(flags);
-- curr_log_seq = log_next_seq;
-- printed_len = vprintk_store(facility, level, dict, dictlen, fmt, args);
-- pending_output = (curr_log_seq != log_next_seq);
-- logbuf_unlock_irqrestore(flags);
--
-- /* If called from the scheduler, we can not call up(). */
-- if (!in_sched && pending_output) {
-- /*
-- * Disable preemption to avoid being preempted while holding
-- * console_sem which would prevent anyone from printing to
-- * console
-- */
-- preempt_disable();
-- /*
-- * Try to acquire and then immediately release the console
-- * semaphore. The release will print out buffers and wake up
-- * /dev/kmsg and syslog() users.
-- */
-- if (console_trylock_spinning())
-- console_unlock();
-- preempt_enable();
-- }
-+ printed_len = log_store(caller_id, facility, level, lflags, ts_nsec,
-+ dict, dictlen, text, text_len);
-
-- if (pending_output)
-- wake_up_klogd();
-+ prb_commit(&h);
- return printed_len;
- }
- EXPORT_SYMBOL(vprintk_emit);
-@@ -2522,7 +2299,7 @@ void console_unlock(void)
- console_lock_spinning_enable();
-
- stop_critical_timings(); /* don't trace print latency */
-- call_console_drivers(ext_text, ext_len, text, len);
-+ //call_console_drivers(ext_text, ext_len, text, len);
- start_critical_timings();
-
- if (console_lock_spinning_disable_and_check()) {
diff --git a/debian/patches-rt/0010-sched-Fix-migrate_disable-vs-set_cpus_allowed_ptr.patch b/debian/patches-rt/0010-sched-Fix-migrate_disable-vs-set_cpus_allowed_ptr.patch
index c7a21884b..010f1bf9d 100644
--- a/debian/patches-rt/0010-sched-Fix-migrate_disable-vs-set_cpus_allowed_ptr.patch
+++ b/debian/patches-rt/0010-sched-Fix-migrate_disable-vs-set_cpus_allowed_ptr.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz@infradead.org>
-Date: Mon, 5 Oct 2020 16:57:27 +0200
-Subject: [PATCH 10/17] sched: Fix migrate_disable() vs set_cpus_allowed_ptr()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Date: Fri, 23 Oct 2020 12:12:08 +0200
+Subject: [PATCH 10/19] sched: Fix migrate_disable() vs set_cpus_allowed_ptr()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Concurrent migrate_disable() and set_cpus_allowed_ptr() has
interesting features. We rely on set_cpus_allowed_ptr() to not return
@@ -31,12 +31,15 @@ that is exactly the case where there is a pending
set_cpus_allowed_ptr(), so let that provide storage for the async stop
machine.
+Much thanks to Valentin who used TLA+ most effective and found lots of
+'interesting' cases.
+
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/sched.h | 1
- kernel/sched/core.c | 161 ++++++++++++++++++++++++++++++++++++++++++--------
- 2 files changed, 139 insertions(+), 23 deletions(-)
+ kernel/sched/core.c | 234 +++++++++++++++++++++++++++++++++++++++++++-------
+ 2 files changed, 205 insertions(+), 30 deletions(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -83,67 +86,143 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL_GPL(migrate_enable);
-@@ -1811,6 +1822,7 @@ static struct rq *move_queued_task(struc
+@@ -1809,8 +1820,16 @@ static struct rq *move_queued_task(struc
+ }
+
struct migration_arg {
- struct task_struct *task;
- int dest_cpu;
-+ struct completion *done;
+- struct task_struct *task;
+- int dest_cpu;
++ struct task_struct *task;
++ int dest_cpu;
++ struct set_affinity_pending *pending;
++};
++
++struct set_affinity_pending {
++ refcount_t refs;
++ struct completion done;
++ struct cpu_stop_work stop_work;
++ struct migration_arg arg;
};
/*
-@@ -1845,6 +1857,7 @@ static int migration_cpu_stop(void *data
+@@ -1842,16 +1861,19 @@ static struct rq *__migrate_task(struct
+ */
+ static int migration_cpu_stop(void *data)
+ {
++ struct set_affinity_pending *pending;
struct migration_arg *arg = data;
struct task_struct *p = arg->task;
++ int dest_cpu = arg->dest_cpu;
struct rq *rq = this_rq();
+ bool complete = false;
struct rq_flags rf;
/*
-@@ -1867,15 +1880,27 @@ static int migration_cpu_stop(void *data
+ * The original target CPU might have gone down and we might
+ * be on another CPU but it doesn't matter.
+ */
+- local_irq_disable();
++ local_irq_save(rf.flags);
+ /*
+ * We need to explicitly wake pending tasks before running
+ * __migrate_task() such that we will not miss enforcing cpus_ptr
+@@ -1861,21 +1883,83 @@ static int migration_cpu_stop(void *data
+
+ raw_spin_lock(&p->pi_lock);
+ rq_lock(rq, &rf);
++
++ pending = p->migration_pending;
+ /*
+ * If task_rq(p) != rq, it cannot be migrated here, because we're
+ * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
* we're holding p->pi_lock.
*/
if (task_rq(p) == rq) {
+ if (is_migration_disabled(p))
+ goto out;
+
++ if (pending) {
++ p->migration_pending = NULL;
++ complete = true;
++ }
++
++ /* migrate_enable() -- we must not race against SCA */
++ if (dest_cpu < 0) {
++ /*
++ * When this was migrate_enable() but we no longer
++ * have a @pending, a concurrent SCA 'fixed' things
++ * and we should be valid again. Nothing to do.
++ */
++ if (!pending) {
++ WARN_ON_ONCE(!is_cpu_allowed(p, cpu_of(rq)));
++ goto out;
++ }
++
++ dest_cpu = cpumask_any_distribute(&p->cpus_mask);
++ }
++
if (task_on_rq_queued(p))
- rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
+- rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
++ rq = __migrate_task(rq, &rf, p, dest_cpu);
else
- p->wake_cpu = arg->dest_cpu;
+- p->wake_cpu = arg->dest_cpu;
++ p->wake_cpu = dest_cpu;
+
-+ if (arg->done) {
-+ p->migration_pending = NULL;
-+ complete = true;
++ } else if (dest_cpu < 0) {
++ /*
++ * This happens when we get migrated between migrate_enable()'s
++ * preempt_enable() and scheduling the stopper task. At that
++ * point we're a regular task again and not current anymore.
++ *
++ * A !PREEMPT kernel has a giant hole here, which makes it far
++ * more likely.
++ */
++
++ /*
++ * When this was migrate_enable() but we no longer have an
++ * @pending, a concurrent SCA 'fixed' things and we should be
++ * valid again. Nothing to do.
++ */
++ if (!pending) {
++ WARN_ON_ONCE(!is_cpu_allowed(p, cpu_of(rq)));
++ goto out;
+ }
++
++ /*
++ * When migrate_enable() hits a rq mis-match we can't reliably
++ * determine is_migration_disabled() and so have to chase after
++ * it.
++ */
++ task_rq_unlock(rq, p, &rf);
++ stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
++ &pending->arg, &pending->stop_work);
++ return 0;
}
+- rq_unlock(rq, &rf);
+- raw_spin_unlock(&p->pi_lock);
+out:
- rq_unlock(rq, &rf);
- raw_spin_unlock(&p->pi_lock);
--
- local_irq_enable();
++ task_rq_unlock(rq, p, &rf);
+
+ if (complete)
-+ complete_all(arg->done);
++ complete_all(&pending->done);
+
++ /* For pending->{arg,stop_work} */
++ pending = arg->pending;
++ if (pending && refcount_dec_and_test(&pending->refs))
++ wake_up_var(&pending->refs);
+
+- local_irq_enable();
return 0;
}
-@@ -1944,6 +1969,111 @@ void do_set_cpus_allowed(struct task_str
- __do_set_cpus_allowed(p, new_mask, 0);
+@@ -1945,6 +2029,110 @@ void do_set_cpus_allowed(struct task_str
}
-+struct set_affinity_pending {
-+ refcount_t refs;
-+ struct completion done;
-+ struct cpu_stop_work stop_work;
-+ struct migration_arg arg;
-+};
-+
-+/*
+ /*
+ * This function is wildly self concurrent, consider at least 3 times.
+ */
-+static int affine_move_task(struct rq *rq, struct rq_flags *rf,
-+ struct task_struct *p, int dest_cpu, unsigned int flags)
++static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
++ int dest_cpu, unsigned int flags)
+{
+ struct set_affinity_pending my_pending = { }, *pending = NULL;
+ struct migration_arg arg = {
@@ -156,6 +235,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
+ pending = p->migration_pending;
+ if (pending) {
++ refcount_inc(&pending->refs);
+ p->migration_pending = NULL;
+ complete = true;
+ }
@@ -194,12 +274,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ if (WARN_ON_ONCE(!pending))
+ return -EINVAL;
+
-+ arg.done = &pending->done;
-+
+ if (flags & SCA_MIGRATE_ENABLE) {
+
++ refcount_inc(&pending->refs); /* pending->{arg,stop_work} */
+ task_rq_unlock(rq, p, rf);
-+ pending->arg = arg;
++
++ pending->arg = (struct migration_arg) {
++ .task = p,
++ .dest_cpu = -1,
++ .pending = pending,
++ };
++
+ stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
+ &pending->arg, &pending->stop_work);
+
@@ -237,17 +322,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ return 0;
+}
+
- /*
++/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
-@@ -2013,23 +2143,8 @@ static int __set_cpus_allowed_ptr(struct
+ * is removed from the allowed bitmask.
+@@ -2013,23 +2201,8 @@ static int __set_cpus_allowed_ptr(struct
p->nr_cpus_allowed != 1);
}
- /* Can the task run on the task's current CPU? If so, we're done */
- if (cpumask_test_cpu(task_cpu(p), new_mask))
- goto out;
-+ return affine_move_task(rq, &rf, p, dest_cpu, flags);
++ return affine_move_task(rq, p, &rf, dest_cpu, flags);
- if (task_running(rq, p) || p->state == TASK_WAKING) {
- struct migration_arg arg = { p, dest_cpu };
@@ -265,3 +351,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out:
task_rq_unlock(rq, p, &rf);
+@@ -3209,6 +3382,7 @@ static void __sched_fork(unsigned long c
+ init_numa_balancing(clone_flags, p);
+ #ifdef CONFIG_SMP
+ p->wake_entry.u_flags = CSD_TYPE_TTWU;
++ p->migration_pending = NULL;
+ #endif
+ }
+
diff --git a/debian/patches-rt/0010-scripts-gdb-update-for-lockless-printk-ringbuffer.patch b/debian/patches-rt/0010-scripts-gdb-update-for-lockless-printk-ringbuffer.patch
new file mode 100644
index 000000000..6ba2028f4
--- /dev/null
+++ b/debian/patches-rt/0010-scripts-gdb-update-for-lockless-printk-ringbuffer.patch
@@ -0,0 +1,389 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Fri, 14 Aug 2020 23:31:25 +0206
+Subject: [PATCH 10/25] scripts/gdb: update for lockless printk ringbuffer
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+With the introduction of the lockless printk ringbuffer, the data
+structure for the kernel log buffer was changed. Update the gdb
+scripts to be able to parse/print the new log buffer structure.
+
+Fixes: 896fbe20b4e2333fb55 ("printk: use the lockless ringbuffer")
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reported-by: Nick Desaulniers <ndesaulniers@google.com>
+Tested-by: Nick Desaulniers <ndesaulniers@google.com>
+Tested-by: Petr Mladek <pmladek@suse.com>
+[akpm@linux-foundation.org: A typo fix.]
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200814212525.6118-3-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ Documentation/admin-guide/kdump/gdbmacros.txt | 151 ++++++++++++++++++--------
+ scripts/gdb/linux/dmesg.py | 139 +++++++++++++++++------
+ 2 files changed, 208 insertions(+), 82 deletions(-)
+
+--- a/Documentation/admin-guide/kdump/gdbmacros.txt
++++ b/Documentation/admin-guide/kdump/gdbmacros.txt
+@@ -170,57 +170,111 @@ document trapinfo
+ address the kernel panicked.
+ end
+
+-define dump_log_idx
+- set $idx = $arg0
++define dump_record
++ set var $desc = $arg0
+ if ($argc > 1)
+- set $prev_flags = $arg1
++ set var $prev_flags = $arg1
+ else
+- set $prev_flags = 0
++ set var $prev_flags = 0
+ end
+- set $msg = ((struct printk_log *) (log_buf + $idx))
+- set $prefix = 1
+- set $newline = 1
+- set $log = log_buf + $idx + sizeof(*$msg)
+
+- # prev & LOG_CONT && !(msg->flags & LOG_PREIX)
+- if (($prev_flags & 8) && !($msg->flags & 4))
+- set $prefix = 0
++ set var $info = &$desc->info
++ set var $prefix = 1
++ set var $newline = 1
++
++ set var $begin = $desc->text_blk_lpos.begin % (1U << prb->text_data_ring.size_bits)
++ set var $next = $desc->text_blk_lpos.next % (1U << prb->text_data_ring.size_bits)
++
++ # handle data-less record
++ if ($begin & 1)
++ set var $text_len = 0
++ set var $log = ""
++ else
++ # handle wrapping data block
++ if ($begin > $next)
++ set var $begin = 0
++ end
++
++ # skip over descriptor id
++ set var $begin = $begin + sizeof(long)
++
++ # handle truncated message
++ if ($next - $begin < $info->text_len)
++ set var $text_len = $next - $begin
++ else
++ set var $text_len = $info->text_len
++ end
++
++ set var $log = &prb->text_data_ring.data[$begin]
++ end
++
++ # prev & LOG_CONT && !(info->flags & LOG_PREIX)
++ if (($prev_flags & 8) && !($info->flags & 4))
++ set var $prefix = 0
+ end
+
+- # msg->flags & LOG_CONT
+- if ($msg->flags & 8)
++ # info->flags & LOG_CONT
++ if ($info->flags & 8)
+ # (prev & LOG_CONT && !(prev & LOG_NEWLINE))
+ if (($prev_flags & 8) && !($prev_flags & 2))
+- set $prefix = 0
++ set var $prefix = 0
+ end
+- # (!(msg->flags & LOG_NEWLINE))
+- if (!($msg->flags & 2))
+- set $newline = 0
++ # (!(info->flags & LOG_NEWLINE))
++ if (!($info->flags & 2))
++ set var $newline = 0
+ end
+ end
+
+ if ($prefix)
+- printf "[%5lu.%06lu] ", $msg->ts_nsec / 1000000000, $msg->ts_nsec % 1000000000
++ printf "[%5lu.%06lu] ", $info->ts_nsec / 1000000000, $info->ts_nsec % 1000000000
+ end
+- if ($msg->text_len != 0)
+- eval "printf \"%%%d.%ds\", $log", $msg->text_len, $msg->text_len
++ if ($text_len)
++ eval "printf \"%%%d.%ds\", $log", $text_len, $text_len
+ end
+ if ($newline)
+ printf "\n"
+ end
+- if ($msg->dict_len > 0)
+- set $dict = $log + $msg->text_len
+- set $idx = 0
+- set $line = 1
+- while ($idx < $msg->dict_len)
++
++ # handle dictionary data
++
++ set var $begin = $desc->dict_blk_lpos.begin % (1U << prb->dict_data_ring.size_bits)
++ set var $next = $desc->dict_blk_lpos.next % (1U << prb->dict_data_ring.size_bits)
++
++ # handle data-less record
++ if ($begin & 1)
++ set var $dict_len = 0
++ set var $dict = ""
++ else
++ # handle wrapping data block
++ if ($begin > $next)
++ set var $begin = 0
++ end
++
++ # skip over descriptor id
++ set var $begin = $begin + sizeof(long)
++
++ # handle truncated message
++ if ($next - $begin < $info->dict_len)
++ set var $dict_len = $next - $begin
++ else
++ set var $dict_len = $info->dict_len
++ end
++
++ set var $dict = &prb->dict_data_ring.data[$begin]
++ end
++
++ if ($dict_len > 0)
++ set var $idx = 0
++ set var $line = 1
++ while ($idx < $dict_len)
+ if ($line)
+ printf " "
+- set $line = 0
++ set var $line = 0
+ end
+- set $c = $dict[$idx]
++ set var $c = $dict[$idx]
+ if ($c == '\0')
+ printf "\n"
+- set $line = 1
++ set var $line = 1
+ else
+ if ($c < ' ' || $c >= 127 || $c == '\\')
+ printf "\\x%02x", $c
+@@ -228,33 +282,40 @@ define dump_log_idx
+ printf "%c", $c
+ end
+ end
+- set $idx = $idx + 1
++ set var $idx = $idx + 1
+ end
+ printf "\n"
+ end
+ end
+-document dump_log_idx
+- Dump a single log given its index in the log buffer. The first
+- parameter is the index into log_buf, the second is optional and
+- specified the previous log buffer's flags, used for properly
+- formatting continued lines.
++document dump_record
++ Dump a single record. The first parameter is the descriptor
++ sequence number, the second is optional and specifies the
++ previous record's flags, used for properly formatting
++ continued lines.
+ end
+
+ define dmesg
+- set $i = log_first_idx
+- set $end_idx = log_first_idx
+- set $prev_flags = 0
++ set var $desc_committed = 1UL << ((sizeof(long) * 8) - 1)
++ set var $flags_mask = 3UL << ((sizeof(long) * 8) - 2)
++ set var $id_mask = ~$flags_mask
++
++ set var $desc_count = 1U << prb->desc_ring.count_bits
++ set var $prev_flags = 0
++
++ set var $id = prb->desc_ring.tail_id.counter
++ set var $end_id = prb->desc_ring.head_id.counter
+
+ while (1)
+- set $msg = ((struct printk_log *) (log_buf + $i))
+- if ($msg->len == 0)
+- set $i = 0
+- else
+- dump_log_idx $i $prev_flags
+- set $i = $i + $msg->len
+- set $prev_flags = $msg->flags
++ set var $desc = &prb->desc_ring.descs[$id % $desc_count]
++
++ # skip non-committed record
++ if (($desc->state_var.counter & $flags_mask) == $desc_committed)
++ dump_record $desc $prev_flags
++ set var $prev_flags = $desc->info.flags
+ end
+- if ($i == $end_idx)
++
++ set var $id = ($id + 1) & $id_mask
++ if ($id == $end_id)
+ loop_break
+ end
+ end
+--- a/scripts/gdb/linux/dmesg.py
++++ b/scripts/gdb/linux/dmesg.py
+@@ -16,8 +16,13 @@ import sys
+
+ from linux import utils
+
+-printk_log_type = utils.CachedType("struct printk_log")
+-
++printk_info_type = utils.CachedType("struct printk_info")
++prb_data_blk_lpos_type = utils.CachedType("struct prb_data_blk_lpos")
++prb_desc_type = utils.CachedType("struct prb_desc")
++prb_desc_ring_type = utils.CachedType("struct prb_desc_ring")
++prb_data_ring_type = utils.CachedType("struct prb_data_ring")
++printk_ringbuffer_type = utils.CachedType("struct printk_ringbuffer")
++atomic_long_type = utils.CachedType("atomic_long_t")
+
+ class LxDmesg(gdb.Command):
+ """Print Linux kernel log buffer."""
+@@ -26,44 +31,102 @@ printk_log_type = utils.CachedType("stru
+ super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA)
+
+ def invoke(self, arg, from_tty):
+- log_buf_addr = int(str(gdb.parse_and_eval(
+- "(void *)'printk.c'::log_buf")).split()[0], 16)
+- log_first_idx = int(gdb.parse_and_eval("'printk.c'::log_first_idx"))
+- log_next_idx = int(gdb.parse_and_eval("'printk.c'::log_next_idx"))
+- log_buf_len = int(gdb.parse_and_eval("'printk.c'::log_buf_len"))
+-
+ inf = gdb.inferiors()[0]
+- start = log_buf_addr + log_first_idx
+- if log_first_idx < log_next_idx:
+- log_buf_2nd_half = -1
+- length = log_next_idx - log_first_idx
+- log_buf = utils.read_memoryview(inf, start, length).tobytes()
+- else:
+- log_buf_2nd_half = log_buf_len - log_first_idx
+- a = utils.read_memoryview(inf, start, log_buf_2nd_half)
+- b = utils.read_memoryview(inf, log_buf_addr, log_next_idx)
+- log_buf = a.tobytes() + b.tobytes()
+-
+- length_offset = printk_log_type.get_type()['len'].bitpos // 8
+- text_len_offset = printk_log_type.get_type()['text_len'].bitpos // 8
+- time_stamp_offset = printk_log_type.get_type()['ts_nsec'].bitpos // 8
+- text_offset = printk_log_type.get_type().sizeof
+-
+- pos = 0
+- while pos < log_buf.__len__():
+- length = utils.read_u16(log_buf, pos + length_offset)
+- if length == 0:
+- if log_buf_2nd_half == -1:
+- gdb.write("Corrupted log buffer!\n")
++
++ # read in prb structure
++ prb_addr = int(str(gdb.parse_and_eval("(void *)'printk.c'::prb")).split()[0], 16)
++ sz = printk_ringbuffer_type.get_type().sizeof
++ prb = utils.read_memoryview(inf, prb_addr, sz).tobytes()
++
++ # read in descriptor ring structure
++ off = printk_ringbuffer_type.get_type()['desc_ring'].bitpos // 8
++ addr = prb_addr + off
++ sz = prb_desc_ring_type.get_type().sizeof
++ desc_ring = utils.read_memoryview(inf, addr, sz).tobytes()
++
++ # read in descriptor array
++ off = prb_desc_ring_type.get_type()['count_bits'].bitpos // 8
++ desc_ring_count = 1 << utils.read_u32(desc_ring, off)
++ desc_sz = prb_desc_type.get_type().sizeof
++ off = prb_desc_ring_type.get_type()['descs'].bitpos // 8
++ addr = utils.read_ulong(desc_ring, off)
++ descs = utils.read_memoryview(inf, addr, desc_sz * desc_ring_count).tobytes()
++
++ # read in text data ring structure
++ off = printk_ringbuffer_type.get_type()['text_data_ring'].bitpos // 8
++ addr = prb_addr + off
++ sz = prb_data_ring_type.get_type().sizeof
++ text_data_ring = utils.read_memoryview(inf, addr, sz).tobytes()
++
++ # read in text data
++ off = prb_data_ring_type.get_type()['size_bits'].bitpos // 8
++ text_data_sz = 1 << utils.read_u32(text_data_ring, off)
++ off = prb_data_ring_type.get_type()['data'].bitpos // 8
++ addr = utils.read_ulong(text_data_ring, off)
++ text_data = utils.read_memoryview(inf, addr, text_data_sz).tobytes()
++
++ counter_off = atomic_long_type.get_type()['counter'].bitpos // 8
++
++ sv_off = prb_desc_type.get_type()['state_var'].bitpos // 8
++
++ off = prb_desc_type.get_type()['text_blk_lpos'].bitpos // 8
++ begin_off = off + (prb_data_blk_lpos_type.get_type()['begin'].bitpos // 8)
++ next_off = off + (prb_data_blk_lpos_type.get_type()['next'].bitpos // 8)
++
++ off = prb_desc_type.get_type()['info'].bitpos // 8
++ ts_off = off + printk_info_type.get_type()['ts_nsec'].bitpos // 8
++ len_off = off + printk_info_type.get_type()['text_len'].bitpos // 8
++
++ # definitions from kernel/printk/printk_ringbuffer.h
++ desc_sv_bits = utils.get_long_type().sizeof * 8
++ desc_committed_mask = 1 << (desc_sv_bits - 1)
++ desc_reuse_mask = 1 << (desc_sv_bits - 2)
++ desc_flags_mask = desc_committed_mask | desc_reuse_mask
++ desc_id_mask = ~desc_flags_mask
++
++ # read in tail and head descriptor ids
++ off = prb_desc_ring_type.get_type()['tail_id'].bitpos // 8
++ tail_id = utils.read_u64(desc_ring, off + counter_off)
++ off = prb_desc_ring_type.get_type()['head_id'].bitpos // 8
++ head_id = utils.read_u64(desc_ring, off + counter_off)
++
++ did = tail_id
++ while True:
++ ind = did % desc_ring_count
++ desc_off = desc_sz * ind
++
++ # skip non-committed record
++ state = utils.read_u64(descs, desc_off + sv_off + counter_off) & desc_flags_mask
++ if state != desc_committed_mask:
++ if did == head_id:
+ break
+- pos = log_buf_2nd_half
++ did = (did + 1) & desc_id_mask
+ continue
+
+- text_len = utils.read_u16(log_buf, pos + text_len_offset)
+- text_start = pos + text_offset
+- text = log_buf[text_start:text_start + text_len].decode(
+- encoding='utf8', errors='replace')
+- time_stamp = utils.read_u64(log_buf, pos + time_stamp_offset)
++ begin = utils.read_ulong(descs, desc_off + begin_off) % text_data_sz
++ end = utils.read_ulong(descs, desc_off + next_off) % text_data_sz
++
++ # handle data-less record
++ if begin & 1 == 1:
++ text = ""
++ else:
++ # handle wrapping data block
++ if begin > end:
++ begin = 0
++
++ # skip over descriptor id
++ text_start = begin + utils.get_long_type().sizeof
++
++ text_len = utils.read_u16(descs, desc_off + len_off)
++
++ # handle truncated message
++ if end - text_start < text_len:
++ text_len = end - text_start
++
++ text = text_data[text_start:text_start + text_len].decode(
++ encoding='utf8', errors='replace')
++
++ time_stamp = utils.read_u64(descs, desc_off + ts_off)
+
+ for line in text.splitlines():
+ msg = u"[{time:12.6f}] {line}\n".format(
+@@ -75,7 +138,9 @@ printk_log_type = utils.CachedType("stru
+ msg = msg.encode(encoding='utf8', errors='replace')
+ gdb.write(msg)
+
+- pos += length
++ if did == head_id:
++ break
++ did = (did + 1) & desc_id_mask
+
+
+ LxDmesg()
diff --git a/debian/patches-rt/0010-seqlock-Use-unique-prefix-for-seqcount_t-property-ac.patch b/debian/patches-rt/0010-seqlock-Use-unique-prefix-for-seqcount_t-property-ac.patch
index fd626ff48..dd74d3ac8 100644
--- a/debian/patches-rt/0010-seqlock-Use-unique-prefix-for-seqcount_t-property-ac.patch
+++ b/debian/patches-rt/0010-seqlock-Use-unique-prefix-for-seqcount_t-property-ac.patch
@@ -2,7 +2,7 @@ From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
Date: Fri, 4 Sep 2020 17:32:28 +0200
Subject: [PATCH 10/13] seqlock: Use unique prefix for seqcount_t property
accessors
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
At seqlock.h, the following set of functions:
diff --git a/debian/patches-rt/0012-locking-split-out-the-rbtree-definition.patch b/debian/patches-rt/0011-locking-split-out-the-rbtree-definition.patch
index 2d33f0b56..5cc6b67c5 100644
--- a/debian/patches-rt/0012-locking-split-out-the-rbtree-definition.patch
+++ b/debian/patches-rt/0011-locking-split-out-the-rbtree-definition.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 14 Aug 2020 17:08:41 +0200
-Subject: [PATCH 12/23] locking: split out the rbtree definition
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Subject: [PATCH 11/22] locking: split out the rbtree definition
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
rtmutex.h needs the definition for rb_root_cached. By including kernel.h
we will get to spinlock.h which requires rtmutex.h again.
diff --git a/debian/patches-rt/0011-printk-introduce-kernel-sync-mode.patch b/debian/patches-rt/0011-printk-introduce-kernel-sync-mode.patch
new file mode 100644
index 000000000..d2a2254bc
--- /dev/null
+++ b/debian/patches-rt/0011-printk-introduce-kernel-sync-mode.patch
@@ -0,0 +1,325 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 14 Oct 2020 20:40:05 +0200
+Subject: [PATCH 11/15] printk: introduce kernel sync mode
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+When the kernel performs an OOPS, enter into "sync mode":
+
+- only atomic consoles (write_atomic() callback) will print
+- printing occurs within vprintk_store() instead of console_unlock()
+
+Change @console_seq to atomic64_t for atomic access.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 165 ++++++++++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 150 insertions(+), 15 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -44,6 +44,7 @@
+ #include <linux/irq_work.h>
+ #include <linux/ctype.h>
+ #include <linux/uio.h>
++#include <linux/clocksource.h>
+ #include <linux/sched/clock.h>
+ #include <linux/sched/debug.h>
+ #include <linux/sched/task_stack.h>
+@@ -78,6 +79,9 @@ EXPORT_SYMBOL(ignore_console_lock_warnin
+ int oops_in_progress;
+ EXPORT_SYMBOL(oops_in_progress);
+
++/* Set to enable sync mode. Once set, it is never cleared. */
++static bool sync_mode;
++
+ /*
+ * console_sem protects the console_drivers list, and also
+ * provides serialisation for access to the entire console
+@@ -370,12 +374,13 @@ static u64 syslog_seq;
+ static size_t syslog_partial;
+ static bool syslog_time;
+
+-/* All 3 protected by @console_sem. */
+-/* the next printk record to write to the console */
+-static u64 console_seq;
++/* Both protected by @console_sem. */
+ static u64 exclusive_console_stop_seq;
+ static unsigned long console_dropped;
+
++/* the next printk record to write to the console */
++static atomic64_t console_seq = ATOMIC64_INIT(0);
++
+ /* the next printk record to read after the last 'clear' command */
+ static atomic64_t clear_seq = ATOMIC64_INIT(0);
+
+@@ -1767,6 +1772,110 @@ static inline void printk_delay(int leve
+ }
+ }
+
++static bool kernel_sync_mode(void)
++{
++ return (oops_in_progress || sync_mode);
++}
++
++static bool console_can_sync(struct console *con)
++{
++ if (!(con->flags & CON_ENABLED))
++ return false;
++ if (con->write_atomic && kernel_sync_mode())
++ return true;
++ return false;
++}
++
++static bool call_sync_console_driver(struct console *con, const char *text, size_t text_len)
++{
++ if (!(con->flags & CON_ENABLED))
++ return false;
++ if (con->write_atomic && kernel_sync_mode())
++ con->write_atomic(con, text, text_len);
++ else
++ return false;
++
++ return true;
++}
++
++static bool any_console_can_sync(void)
++{
++ struct console *con;
++
++ for_each_console(con) {
++ if (console_can_sync(con))
++ return true;
++ }
++ return false;
++}
++
++static bool have_atomic_console(void)
++{
++ struct console *con;
++
++ for_each_console(con) {
++ if (!(con->flags & CON_ENABLED))
++ continue;
++ if (con->write_atomic)
++ return true;
++ }
++ return false;
++}
++
++static bool print_sync(struct console *con, char *buf, size_t buf_size, u64 *seq)
++{
++ struct printk_info info;
++ struct printk_record r;
++ size_t text_len;
++
++ prb_rec_init_rd(&r, &info, buf, buf_size);
++
++ if (!prb_read_valid(prb, *seq, &r))
++ return false;
++
++ text_len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
++
++ if (!call_sync_console_driver(con, buf, text_len))
++ return false;
++
++ *seq = r.info->seq;
++
++ touch_softlockup_watchdog_sync();
++ clocksource_touch_watchdog();
++ rcu_cpu_stall_reset();
++ touch_nmi_watchdog();
++
++ if (text_len)
++ printk_delay(r.info->level);
++
++ return true;
++}
++
++static void print_sync_until(u64 seq, struct console *con, char *buf, size_t buf_size)
++{
++ unsigned int flags;
++ u64 printk_seq;
++
++ if (!con) {
++ for_each_console(con) {
++ if (console_can_sync(con))
++ print_sync_until(seq, con, buf, buf_size);
++ }
++ return;
++ }
++
++ console_atomic_lock(&flags);
++ for (;;) {
++ printk_seq = atomic64_read(&console_seq);
++ if (printk_seq >= seq)
++ break;
++ if (!print_sync(con, buf, buf_size, &printk_seq))
++ break;
++ atomic64_set(&console_seq, printk_seq + 1);
++ }
++ console_atomic_unlock(flags);
++}
++
+ /*
+ * Special console_lock variants that help to reduce the risk of soft-lockups.
+ * They allow to pass console_lock to another printk() call using a busy wait.
+@@ -1941,6 +2050,8 @@ static void call_console_drivers(const c
+ if (!cpu_online(smp_processor_id()) &&
+ !(con->flags & CON_ANYTIME))
+ continue;
++ if (kernel_sync_mode())
++ continue;
+ if (con->flags & CON_EXTENDED)
+ con->write(con, ext_text, ext_len);
+ else {
+@@ -1964,6 +2075,7 @@ int vprintk_store(int facility, int leve
+ const u32 caller_id = printk_caller_id();
+ struct prb_reserved_entry e;
+ enum log_flags lflags = 0;
++ bool final_commit = false;
+ unsigned long irqflags;
+ struct printk_record r;
+ u16 trunc_msg_len = 0;
+@@ -2027,6 +2139,7 @@ int vprintk_store(int facility, int leve
+ if (lflags & LOG_NEWLINE) {
+ r.info->flags |= LOG_NEWLINE;
+ prb_final_commit(&e);
++ final_commit = true;
+ } else {
+ prb_commit(&e);
+ }
+@@ -2068,10 +2181,15 @@ int vprintk_store(int facility, int leve
+ prb_commit(&e);
+ } else {
+ prb_final_commit(&e);
++ final_commit = true;
+ }
+
+ ret = text_len + trunc_msg_len;
+ out:
++ /* only the kernel may perform synchronous printing */
++ if (facility == 0 && final_commit && any_console_can_sync())
++ print_sync_until(seq + 1, NULL, text, PREFIX_MAX + LOG_LINE_MAX);
++
+ put_sprint_buf(sprint_id, irqflags);
+ return ret;
+ }
+@@ -2176,7 +2294,7 @@ EXPORT_SYMBOL(printk);
+ #define prb_first_valid_seq(rb) 0
+
+ static u64 syslog_seq;
+-static u64 console_seq;
++static atomic64_t console_seq = ATOMI64_INIT(0);
+ static u64 exclusive_console_stop_seq;
+ static unsigned long console_dropped;
+
+@@ -2460,6 +2578,8 @@ static int have_callable_console(void)
+ */
+ static inline int can_use_console(void)
+ {
++ if (kernel_sync_mode())
++ return false;
+ return cpu_online(raw_smp_processor_id()) || have_callable_console();
+ }
+
+@@ -2485,6 +2605,7 @@ void console_unlock(void)
+ bool do_cond_resched, retry;
+ struct printk_info info;
+ struct printk_record r;
++ u64 seq;
+
+ if (console_suspended) {
+ up_console_sem();
+@@ -2528,12 +2649,14 @@ void console_unlock(void)
+
+ printk_safe_enter_irqsave(flags);
+ skip:
+- if (!prb_read_valid(prb, console_seq, &r))
++ seq = atomic64_read(&console_seq);
++ if (!prb_read_valid(prb, seq, &r))
+ break;
+
+- if (console_seq != r.info->seq) {
+- console_dropped += r.info->seq - console_seq;
+- console_seq = r.info->seq;
++ if (seq != r.info->seq) {
++ console_dropped += r.info->seq - seq;
++ atomic64_set(&console_seq, r.info->seq);
++ seq = r.info->seq;
+ }
+
+ if (suppress_message_printing(r.info->level)) {
+@@ -2542,13 +2665,13 @@ void console_unlock(void)
+ * directly to the console when we received it, and
+ * record that has level above the console loglevel.
+ */
+- console_seq++;
++ atomic64_set(&console_seq, seq + 1);
+ goto skip;
+ }
+
+ /* Output to all consoles once old messages replayed. */
+ if (unlikely(exclusive_console &&
+- console_seq >= exclusive_console_stop_seq)) {
++ seq >= exclusive_console_stop_seq)) {
+ exclusive_console = NULL;
+ }
+
+@@ -2569,7 +2692,7 @@ void console_unlock(void)
+ len = record_print_text(&r,
+ console_msg_format & MSG_FORMAT_SYSLOG,
+ printk_time);
+- console_seq++;
++ atomic64_set(&console_seq, seq + 1);
+
+ /*
+ * While actively printing out messages, if another printk()
+@@ -2604,7 +2727,7 @@ void console_unlock(void)
+ * there's a new owner and the console_unlock() from them will do the
+ * flush, no worries.
+ */
+- retry = prb_read_valid(prb, console_seq, NULL);
++ retry = prb_read_valid(prb, atomic64_read(&console_seq), NULL);
+ printk_safe_exit_irqrestore(flags);
+
+ if (retry && console_trylock())
+@@ -2669,7 +2792,7 @@ void console_flush_on_panic(enum con_flu
+ console_may_schedule = 0;
+
+ if (mode == CONSOLE_REPLAY_ALL)
+- console_seq = prb_first_valid_seq(prb);
++ atomic64_set(&console_seq, prb_first_valid_seq(prb));
+ console_unlock();
+ }
+
+@@ -2904,8 +3027,8 @@ void register_console(struct console *ne
+ * ignores console_lock.
+ */
+ exclusive_console = newcon;
+- exclusive_console_stop_seq = console_seq;
+- console_seq = syslog_seq;
++ exclusive_console_stop_seq = atomic64_read(&console_seq);
++ atomic64_set(&console_seq, syslog_seq);
+ syslog_unlock_irqrestore(flags);
+ }
+ console_unlock();
+@@ -3273,6 +3396,18 @@ void kmsg_dump(enum kmsg_dump_reason rea
+ {
+ struct kmsg_dumper *dumper;
+
++ if (!oops_in_progress) {
++ /*
++ * If atomic consoles are available, activate kernel sync mode
++ * to make sure any final messages are visible. The trailing
++ * printk message is important to flush any pending messages.
++ */
++ if (have_atomic_console()) {
++ sync_mode = true;
++ pr_info("enabled sync mode\n");
++ }
++ }
++
+ rcu_read_lock();
+ list_for_each_entry_rcu(dumper, &dump_list, list) {
+ enum kmsg_dump_reason max_reason = dumper->max_reason;
diff --git a/debian/patches-rt/0011-printk-ringbuffer-fix-setting-state-in-desc_read.patch b/debian/patches-rt/0011-printk-ringbuffer-fix-setting-state-in-desc_read.patch
new file mode 100644
index 000000000..9e3ab7cf8
--- /dev/null
+++ b/debian/patches-rt/0011-printk-ringbuffer-fix-setting-state-in-desc_read.patch
@@ -0,0 +1,75 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 14 Sep 2020 11:54:02 +0206
+Subject: [PATCH 11/25] printk: ringbuffer: fix setting state in desc_read()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+It is expected that desc_read() will always set at least the
+@state_var field. However, if the descriptor is in an inconsistent
+state, no fields are set.
+
+Also, the second load of @state_var is not stored in @desc_out and
+so might not match the state value that is returned.
+
+Always set the last loaded @state_var into @desc_out, regardless of
+the descriptor consistency.
+
+Fixes: b6cf8b3f3312 ("printk: add lockless ringbuffer")
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200914094803.27365-1-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk_ringbuffer.c | 26 +++++++++++++++++++-------
+ 1 file changed, 19 insertions(+), 7 deletions(-)
+
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -368,9 +368,9 @@ static enum desc_state get_desc_state(un
+ }
+
+ /*
+- * Get a copy of a specified descriptor and its queried state. A descriptor
+- * that is not in the committed or reusable state must be considered garbage
+- * by the reader.
++ * Get a copy of a specified descriptor and return its queried state. If the
++ * descriptor is in an inconsistent state (miss or reserved), the caller can
++ * only expect the descriptor's @state_var field to be valid.
+ */
+ static enum desc_state desc_read(struct prb_desc_ring *desc_ring,
+ unsigned long id, struct prb_desc *desc_out)
+@@ -383,8 +383,14 @@ static enum desc_state desc_read(struct
+ /* Check the descriptor state. */
+ state_val = atomic_long_read(state_var); /* LMM(desc_read:A) */
+ d_state = get_desc_state(id, state_val);
+- if (d_state != desc_committed && d_state != desc_reusable)
+- return d_state;
++ if (d_state == desc_miss || d_state == desc_reserved) {
++ /*
++ * The descriptor is in an inconsistent state. Set at least
++ * @state_var so that the caller can see the details of
++ * the inconsistent state.
++ */
++ goto out;
++ }
+
+ /*
+ * Guarantee the state is loaded before copying the descriptor
+@@ -449,9 +455,15 @@ static enum desc_state desc_read(struct
+ */
+ smp_rmb(); /* LMM(desc_read:D) */
+
+- /* Re-check the descriptor state. */
++ /*
++ * The data has been copied. Return the current descriptor state,
++ * which may have changed since the load above.
++ */
+ state_val = atomic_long_read(state_var); /* LMM(desc_read:E) */
+- return get_desc_state(id, state_val);
++ d_state = get_desc_state(id, state_val);
++out:
++ atomic_long_set(&desc_out->state_var, state_val);
++ return d_state;
+ }
+
+ /*
diff --git a/debian/patches-rt/0011-sched-core-Make-migrate-disable-and-CPU-hotplug-coop.patch b/debian/patches-rt/0011-sched-core-Make-migrate-disable-and-CPU-hotplug-coop.patch
index c8cfe6995..13a128388 100644
--- a/debian/patches-rt/0011-sched-core-Make-migrate-disable-and-CPU-hotplug-coop.patch
+++ b/debian/patches-rt/0011-sched-core-Make-migrate-disable-and-CPU-hotplug-coop.patch
@@ -1,8 +1,8 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 5 Oct 2020 16:57:28 +0200
-Subject: [PATCH 11/17] sched/core: Make migrate disable and CPU hotplug
+Date: Fri, 23 Oct 2020 12:12:09 +0200
+Subject: [PATCH 11/19] sched/core: Make migrate disable and CPU hotplug
cooperative
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
On CPU unplug tasks which are in a migrate disabled region cannot be pushed
to a different CPU until they returned to migrateable state.
@@ -60,7 +60,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
/*
-@@ -2637,6 +2650,11 @@ static inline bool is_migration_disabled
+@@ -2695,6 +2708,11 @@ static inline bool is_migration_disabled
return false;
}
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
static void
-@@ -7006,15 +7024,20 @@ static bool balance_push(struct rq *rq)
+@@ -7064,15 +7082,20 @@ static void balance_push(struct rq *rq)
* Both the cpu-hotplug and stop task are in this case and are
* required to complete the hotplug process.
*/
@@ -95,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock(&rq->lock);
rcuwait_wake_up(&rq->hotplug_wait);
raw_spin_lock(&rq->lock);
-@@ -7063,7 +7086,8 @@ static void balance_hotplug_wait(void)
+@@ -7119,7 +7142,8 @@ static void balance_hotplug_wait(void)
{
struct rq *rq = this_rq();
@@ -105,7 +105,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
TASK_UNINTERRUPTIBLE);
}
-@@ -7309,7 +7333,7 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -7364,7 +7388,7 @@ int sched_cpu_dying(unsigned int cpu)
sched_tick_stop(cpu);
rq_lock_irqsave(rq, &rf);
diff --git a/debian/patches-rt/0011-seqlock-seqcount_t-Implement-all-read-APIs-as-statem.patch b/debian/patches-rt/0011-seqlock-seqcount_t-Implement-all-read-APIs-as-statem.patch
index a77f465d1..bbaf6707d 100644
--- a/debian/patches-rt/0011-seqlock-seqcount_t-Implement-all-read-APIs-as-statem.patch
+++ b/debian/patches-rt/0011-seqlock-seqcount_t-Implement-all-read-APIs-as-statem.patch
@@ -2,7 +2,7 @@ From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
Date: Fri, 4 Sep 2020 17:32:29 +0200
Subject: [PATCH 11/13] seqlock: seqcount_t: Implement all read APIs as
statement expressions
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The sequence counters read APIs are implemented as CPP macros, so they
can take either seqcount_t or any of the seqcount_LOCKNAME_t variants.
diff --git a/debian/patches-rt/0013-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch b/debian/patches-rt/0012-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch
index 2b17868e7..9f7415099 100644
--- a/debian/patches-rt/0013-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch
+++ b/debian/patches-rt/0012-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 16:14:22 +0200
-Subject: [PATCH 13/23] locking/rtmutex: Provide rt_mutex_slowlock_locked()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Subject: [PATCH 12/22] locking/rtmutex: Provide rt_mutex_slowlock_locked()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
This is the inner-part of rt_mutex_slowlock(), required for rwsem-rt.
diff --git a/debian/patches-rt/0012-printk-minimize-console-locking-implementation.patch b/debian/patches-rt/0012-printk-minimize-console-locking-implementation.patch
deleted file mode 100644
index 4050ef0c3..000000000
--- a/debian/patches-rt/0012-printk-minimize-console-locking-implementation.patch
+++ /dev/null
@@ -1,330 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:50 +0100
-Subject: [PATCH 12/25] printk: minimize console locking implementation
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-Since printing of the printk buffer is now handled by the printk
-kthread, minimize the console locking functions to just handle
-locking of the console.
-
-NOTE: With this console_flush_on_panic will no longer flush.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk.c | 254 -------------------------------------------------
- 1 file changed, 1 insertion(+), 253 deletions(-)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -226,19 +226,7 @@ static int nr_ext_console_drivers;
-
- static int __down_trylock_console_sem(unsigned long ip)
- {
-- int lock_failed;
-- unsigned long flags;
--
-- /*
-- * Here and in __up_console_sem() we need to be in safe mode,
-- * because spindump/WARN/etc from under console ->lock will
-- * deadlock in printk()->down_trylock_console_sem() otherwise.
-- */
-- printk_safe_enter_irqsave(flags);
-- lock_failed = down_trylock(&console_sem);
-- printk_safe_exit_irqrestore(flags);
--
-- if (lock_failed)
-+ if (down_trylock(&console_sem))
- return 1;
- mutex_acquire(&console_lock_dep_map, 0, 1, ip);
- return 0;
-@@ -247,13 +235,9 @@ static int __down_trylock_console_sem(un
-
- static void __up_console_sem(unsigned long ip)
- {
-- unsigned long flags;
--
- mutex_release(&console_lock_dep_map, ip);
-
-- printk_safe_enter_irqsave(flags);
- up(&console_sem);
-- printk_safe_exit_irqrestore(flags);
- }
- #define up_console_sem() __up_console_sem(_RET_IP_)
-
-@@ -1560,82 +1544,6 @@ static void format_text(struct printk_lo
- }
-
- /*
-- * Special console_lock variants that help to reduce the risk of soft-lockups.
-- * They allow to pass console_lock to another printk() call using a busy wait.
-- */
--
--#ifdef CONFIG_LOCKDEP
--static struct lockdep_map console_owner_dep_map = {
-- .name = "console_owner"
--};
--#endif
--
--static DEFINE_RAW_SPINLOCK(console_owner_lock);
--static struct task_struct *console_owner;
--static bool console_waiter;
--
--/**
-- * console_lock_spinning_enable - mark beginning of code where another
-- * thread might safely busy wait
-- *
-- * This basically converts console_lock into a spinlock. This marks
-- * the section where the console_lock owner can not sleep, because
-- * there may be a waiter spinning (like a spinlock). Also it must be
-- * ready to hand over the lock at the end of the section.
-- */
--static void console_lock_spinning_enable(void)
--{
-- raw_spin_lock(&console_owner_lock);
-- console_owner = current;
-- raw_spin_unlock(&console_owner_lock);
--
-- /* The waiter may spin on us after setting console_owner */
-- spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
--}
--
--/**
-- * console_lock_spinning_disable_and_check - mark end of code where another
-- * thread was able to busy wait and check if there is a waiter
-- *
-- * This is called at the end of the section where spinning is allowed.
-- * It has two functions. First, it is a signal that it is no longer
-- * safe to start busy waiting for the lock. Second, it checks if
-- * there is a busy waiter and passes the lock rights to her.
-- *
-- * Important: Callers lose the lock if there was a busy waiter.
-- * They must not touch items synchronized by console_lock
-- * in this case.
-- *
-- * Return: 1 if the lock rights were passed, 0 otherwise.
-- */
--static int console_lock_spinning_disable_and_check(void)
--{
-- int waiter;
--
-- raw_spin_lock(&console_owner_lock);
-- waiter = READ_ONCE(console_waiter);
-- console_owner = NULL;
-- raw_spin_unlock(&console_owner_lock);
--
-- if (!waiter) {
-- spin_release(&console_owner_dep_map, _THIS_IP_);
-- return 0;
-- }
--
-- /* The waiter is now free to continue */
-- WRITE_ONCE(console_waiter, false);
--
-- spin_release(&console_owner_dep_map, _THIS_IP_);
--
-- /*
-- * Hand off console_lock to waiter. The waiter will perform
-- * the up(). After this, the waiter is the console_lock owner.
-- */
-- mutex_release(&console_lock_dep_map, _THIS_IP_);
-- return 1;
--}
--
--/*
- * Call the console drivers, asking them to write out
- * log_buf[start] to log_buf[end - 1].
- * The console_lock must be held.
-@@ -1883,8 +1791,6 @@ static ssize_t msg_print_ext_header(char
- static ssize_t msg_print_ext_body(char *buf, size_t size,
- char *dict, size_t dict_len,
- char *text, size_t text_len) { return 0; }
--static void console_lock_spinning_enable(void) { }
--static int console_lock_spinning_disable_and_check(void) { return 0; }
- static void call_console_drivers(const char *ext_text, size_t ext_len,
- const char *text, size_t len) {}
- static size_t msg_print_text(const struct printk_log *msg, bool syslog,
-@@ -2125,35 +2031,6 @@ int is_console_locked(void)
- {
- return console_locked;
- }
--EXPORT_SYMBOL(is_console_locked);
--
--/*
-- * Check if we have any console that is capable of printing while cpu is
-- * booting or shutting down. Requires console_sem.
-- */
--static int have_callable_console(void)
--{
-- struct console *con;
--
-- for_each_console(con)
-- if ((con->flags & CON_ENABLED) &&
-- (con->flags & CON_ANYTIME))
-- return 1;
--
-- return 0;
--}
--
--/*
-- * Can we actually use the console at this time on this cpu?
-- *
-- * Console drivers may assume that per-cpu resources have been allocated. So
-- * unless they're explicitly marked as being able to cope (CON_ANYTIME) don't
-- * call them until this CPU is officially up.
-- */
--static inline int can_use_console(void)
--{
-- return cpu_online(raw_smp_processor_id()) || have_callable_console();
--}
-
- /**
- * console_unlock - unlock the console system
-@@ -2161,147 +2038,18 @@ static inline int can_use_console(void)
- * Releases the console_lock which the caller holds on the console system
- * and the console driver list.
- *
-- * While the console_lock was held, console output may have been buffered
-- * by printk(). If this is the case, console_unlock(); emits
-- * the output prior to releasing the lock.
-- *
-- * If there is output waiting, we wake /dev/kmsg and syslog() users.
-- *
- * console_unlock(); may be called from any context.
- */
- void console_unlock(void)
- {
-- static char ext_text[CONSOLE_EXT_LOG_MAX];
-- static char text[LOG_LINE_MAX + PREFIX_MAX];
-- unsigned long flags;
-- bool do_cond_resched, retry;
--
- if (console_suspended) {
- up_console_sem();
- return;
- }
-
-- /*
-- * Console drivers are called with interrupts disabled, so
-- * @console_may_schedule should be cleared before; however, we may
-- * end up dumping a lot of lines, for example, if called from
-- * console registration path, and should invoke cond_resched()
-- * between lines if allowable. Not doing so can cause a very long
-- * scheduling stall on a slow console leading to RCU stall and
-- * softlockup warnings which exacerbate the issue with more
-- * messages practically incapacitating the system.
-- *
-- * console_trylock() is not able to detect the preemptive
-- * context reliably. Therefore the value must be stored before
-- * and cleared after the the "again" goto label.
-- */
-- do_cond_resched = console_may_schedule;
--again:
-- console_may_schedule = 0;
--
-- /*
-- * We released the console_sem lock, so we need to recheck if
-- * cpu is online and (if not) is there at least one CON_ANYTIME
-- * console.
-- */
-- if (!can_use_console()) {
-- console_locked = 0;
-- up_console_sem();
-- return;
-- }
--
-- for (;;) {
-- struct printk_log *msg;
-- size_t ext_len = 0;
-- size_t len;
--
-- printk_safe_enter_irqsave(flags);
-- raw_spin_lock(&logbuf_lock);
-- if (console_seq < log_first_seq) {
-- len = snprintf(text, sizeof(text),
-- "** %llu printk messages dropped **\n",
-- log_first_seq - console_seq);
--
-- /* messages are gone, move to first one */
-- console_seq = log_first_seq;
-- console_idx = log_first_idx;
-- } else {
-- len = 0;
-- }
--skip:
-- if (console_seq == log_next_seq)
-- break;
--
-- msg = log_from_idx(console_idx);
-- if (suppress_message_printing(msg->level)) {
-- /*
-- * Skip record we have buffered and already printed
-- * directly to the console when we received it, and
-- * record that has level above the console loglevel.
-- */
-- console_idx = log_next(console_idx);
-- console_seq++;
-- goto skip;
-- }
--
-- len += msg_print_text(msg,
-- console_msg_format & MSG_FORMAT_SYSLOG,
-- printk_time, text + len, sizeof(text) - len);
-- if (nr_ext_console_drivers) {
-- ext_len = msg_print_ext_header(ext_text,
-- sizeof(ext_text),
-- msg, console_seq);
-- ext_len += msg_print_ext_body(ext_text + ext_len,
-- sizeof(ext_text) - ext_len,
-- log_dict(msg), msg->dict_len,
-- log_text(msg), msg->text_len);
-- }
-- console_idx = log_next(console_idx);
-- console_seq++;
-- raw_spin_unlock(&logbuf_lock);
--
-- /*
-- * While actively printing out messages, if another printk()
-- * were to occur on another CPU, it may wait for this one to
-- * finish. This task can not be preempted if there is a
-- * waiter waiting to take over.
-- */
-- console_lock_spinning_enable();
--
-- stop_critical_timings(); /* don't trace print latency */
-- //call_console_drivers(ext_text, ext_len, text, len);
-- start_critical_timings();
--
-- if (console_lock_spinning_disable_and_check()) {
-- printk_safe_exit_irqrestore(flags);
-- return;
-- }
--
-- printk_safe_exit_irqrestore(flags);
--
-- if (do_cond_resched)
-- cond_resched();
-- }
--
- console_locked = 0;
-
-- raw_spin_unlock(&logbuf_lock);
--
- up_console_sem();
--
-- /*
-- * Someone could have filled up the buffer again, so re-check if there's
-- * something to flush. In case we cannot trylock the console_sem again,
-- * there's a new owner and the console_unlock() from them will do the
-- * flush, no worries.
-- */
-- raw_spin_lock(&logbuf_lock);
-- retry = console_seq != log_next_seq;
-- raw_spin_unlock(&logbuf_lock);
-- printk_safe_exit_irqrestore(flags);
--
-- if (retry && console_trylock())
-- goto again;
- }
- EXPORT_SYMBOL(console_unlock);
-
diff --git a/debian/patches-rt/0012-printk-move-console-printing-to-kthreads.patch b/debian/patches-rt/0012-printk-move-console-printing-to-kthreads.patch
new file mode 100644
index 000000000..7c9aa6cdb
--- /dev/null
+++ b/debian/patches-rt/0012-printk-move-console-printing-to-kthreads.patch
@@ -0,0 +1,800 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 19 Oct 2020 22:30:38 +0206
+Subject: [PATCH 12/15] printk: move console printing to kthreads
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+Create a kthread for each console to perform console printing. Now
+all console printing is fully asynchronous except for the boot
+console and when the kernel enters sync mode (and there are atomic
+consoles available).
+
+The console_lock() and console_unlock() functions now only do what
+their name says... locking and unlocking of the console.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 2
+ kernel/printk/printk.c | 586 +++++++++++++++---------------------------------
+ 2 files changed, 190 insertions(+), 398 deletions(-)
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -151,6 +151,8 @@ struct console {
+ short flags;
+ short index;
+ int cflag;
++ atomic64_t printk_seq;
++ struct task_struct *thread;
+ void *data;
+ struct console *next;
+ };
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -44,6 +44,7 @@
+ #include <linux/irq_work.h>
+ #include <linux/ctype.h>
+ #include <linux/uio.h>
++#include <linux/kthread.h>
+ #include <linux/clocksource.h>
+ #include <linux/sched/clock.h>
+ #include <linux/sched/debug.h>
+@@ -271,11 +272,6 @@ static void __up_console_sem(unsigned lo
+ static int console_locked, console_suspended;
+
+ /*
+- * If exclusive_console is non-NULL then only this console is to be printed to.
+- */
+-static struct console *exclusive_console;
+-
+-/*
+ * Array of consoles built from command line options (console=)
+ */
+
+@@ -374,13 +370,6 @@ static u64 syslog_seq;
+ static size_t syslog_partial;
+ static bool syslog_time;
+
+-/* Both protected by @console_sem. */
+-static u64 exclusive_console_stop_seq;
+-static unsigned long console_dropped;
+-
+-/* the next printk record to write to the console */
+-static atomic64_t console_seq = ATOMIC64_INIT(0);
+-
+ /* the next printk record to read after the last 'clear' command */
+ static atomic64_t clear_seq = ATOMIC64_INIT(0);
+
+@@ -1783,6 +1772,8 @@ static bool console_can_sync(struct cons
+ return false;
+ if (con->write_atomic && kernel_sync_mode())
+ return true;
++ if (con->write && (con->flags & CON_BOOT) && !con->thread)
++ return true;
+ return false;
+ }
+
+@@ -1792,6 +1783,8 @@ static bool call_sync_console_driver(str
+ return false;
+ if (con->write_atomic && kernel_sync_mode())
+ con->write_atomic(con, text, text_len);
++ else if (con->write && (con->flags & CON_BOOT) && !con->thread)
++ con->write(con, text, text_len);
+ else
+ return false;
+
+@@ -1866,202 +1859,16 @@ static void print_sync_until(u64 seq, st
+
+ console_atomic_lock(&flags);
+ for (;;) {
+- printk_seq = atomic64_read(&console_seq);
++ printk_seq = atomic64_read(&con->printk_seq);
+ if (printk_seq >= seq)
+ break;
+ if (!print_sync(con, buf, buf_size, &printk_seq))
+ break;
+- atomic64_set(&console_seq, printk_seq + 1);
++ atomic64_set(&con->printk_seq, printk_seq + 1);
+ }
+ console_atomic_unlock(flags);
+ }
+
+-/*
+- * Special console_lock variants that help to reduce the risk of soft-lockups.
+- * They allow to pass console_lock to another printk() call using a busy wait.
+- */
+-
+-#ifdef CONFIG_LOCKDEP
+-static struct lockdep_map console_owner_dep_map = {
+- .name = "console_owner"
+-};
+-#endif
+-
+-static DEFINE_RAW_SPINLOCK(console_owner_lock);
+-static struct task_struct *console_owner;
+-static bool console_waiter;
+-
+-/**
+- * console_lock_spinning_enable - mark beginning of code where another
+- * thread might safely busy wait
+- *
+- * This basically converts console_lock into a spinlock. This marks
+- * the section where the console_lock owner can not sleep, because
+- * there may be a waiter spinning (like a spinlock). Also it must be
+- * ready to hand over the lock at the end of the section.
+- */
+-static void console_lock_spinning_enable(void)
+-{
+- raw_spin_lock(&console_owner_lock);
+- console_owner = current;
+- raw_spin_unlock(&console_owner_lock);
+-
+- /* The waiter may spin on us after setting console_owner */
+- spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
+-}
+-
+-/**
+- * console_lock_spinning_disable_and_check - mark end of code where another
+- * thread was able to busy wait and check if there is a waiter
+- *
+- * This is called at the end of the section where spinning is allowed.
+- * It has two functions. First, it is a signal that it is no longer
+- * safe to start busy waiting for the lock. Second, it checks if
+- * there is a busy waiter and passes the lock rights to her.
+- *
+- * Important: Callers lose the lock if there was a busy waiter.
+- * They must not touch items synchronized by console_lock
+- * in this case.
+- *
+- * Return: 1 if the lock rights were passed, 0 otherwise.
+- */
+-static int console_lock_spinning_disable_and_check(void)
+-{
+- int waiter;
+-
+- raw_spin_lock(&console_owner_lock);
+- waiter = READ_ONCE(console_waiter);
+- console_owner = NULL;
+- raw_spin_unlock(&console_owner_lock);
+-
+- if (!waiter) {
+- spin_release(&console_owner_dep_map, _THIS_IP_);
+- return 0;
+- }
+-
+- /* The waiter is now free to continue */
+- WRITE_ONCE(console_waiter, false);
+-
+- spin_release(&console_owner_dep_map, _THIS_IP_);
+-
+- /*
+- * Hand off console_lock to waiter. The waiter will perform
+- * the up(). After this, the waiter is the console_lock owner.
+- */
+- mutex_release(&console_lock_dep_map, _THIS_IP_);
+- return 1;
+-}
+-
+-/**
+- * console_trylock_spinning - try to get console_lock by busy waiting
+- *
+- * This allows to busy wait for the console_lock when the current
+- * owner is running in specially marked sections. It means that
+- * the current owner is running and cannot reschedule until it
+- * is ready to lose the lock.
+- *
+- * Return: 1 if we got the lock, 0 othrewise
+- */
+-static int console_trylock_spinning(void)
+-{
+- struct task_struct *owner = NULL;
+- bool waiter;
+- bool spin = false;
+- unsigned long flags;
+-
+- if (console_trylock())
+- return 1;
+-
+- printk_safe_enter_irqsave(flags);
+-
+- raw_spin_lock(&console_owner_lock);
+- owner = READ_ONCE(console_owner);
+- waiter = READ_ONCE(console_waiter);
+- if (!waiter && owner && owner != current) {
+- WRITE_ONCE(console_waiter, true);
+- spin = true;
+- }
+- raw_spin_unlock(&console_owner_lock);
+-
+- /*
+- * If there is an active printk() writing to the
+- * consoles, instead of having it write our data too,
+- * see if we can offload that load from the active
+- * printer, and do some printing ourselves.
+- * Go into a spin only if there isn't already a waiter
+- * spinning, and there is an active printer, and
+- * that active printer isn't us (recursive printk?).
+- */
+- if (!spin) {
+- printk_safe_exit_irqrestore(flags);
+- return 0;
+- }
+-
+- /* We spin waiting for the owner to release us */
+- spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
+- /* Owner will clear console_waiter on hand off */
+- while (READ_ONCE(console_waiter))
+- cpu_relax();
+- spin_release(&console_owner_dep_map, _THIS_IP_);
+-
+- printk_safe_exit_irqrestore(flags);
+- /*
+- * The owner passed the console lock to us.
+- * Since we did not spin on console lock, annotate
+- * this as a trylock. Otherwise lockdep will
+- * complain.
+- */
+- mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
+-
+- return 1;
+-}
+-
+-/*
+- * Call the console drivers, asking them to write out
+- * log_buf[start] to log_buf[end - 1].
+- * The console_lock must be held.
+- */
+-static void call_console_drivers(const char *ext_text, size_t ext_len,
+- const char *text, size_t len)
+-{
+- static char dropped_text[64];
+- size_t dropped_len = 0;
+- struct console *con;
+-
+- trace_console_rcuidle(text, len);
+-
+- if (!console_drivers)
+- return;
+-
+- if (console_dropped) {
+- dropped_len = snprintf(dropped_text, sizeof(dropped_text),
+- "** %lu printk messages dropped **\n",
+- console_dropped);
+- console_dropped = 0;
+- }
+-
+- for_each_console(con) {
+- if (exclusive_console && con != exclusive_console)
+- continue;
+- if (!(con->flags & CON_ENABLED))
+- continue;
+- if (!con->write)
+- continue;
+- if (!cpu_online(smp_processor_id()) &&
+- !(con->flags & CON_ANYTIME))
+- continue;
+- if (kernel_sync_mode())
+- continue;
+- if (con->flags & CON_EXTENDED)
+- con->write(con, ext_text, ext_len);
+- else {
+- if (dropped_len)
+- con->write(con, dropped_text, dropped_len);
+- con->write(con, text, len);
+- }
+- }
+-}
+-
+ static inline u32 printk_caller_id(void)
+ {
+ return in_task() ? task_pid_nr(current) :
+@@ -2199,40 +2006,16 @@ asmlinkage int vprintk_emit(int facility
+ const char *fmt, va_list args)
+ {
+ int printed_len;
+- bool in_sched = false;
+
+ /* Suppress unimportant messages after panic happens */
+ if (unlikely(suppress_printk))
+ return 0;
+
+- if (level == LOGLEVEL_SCHED) {
++ if (level == LOGLEVEL_SCHED)
+ level = LOGLEVEL_DEFAULT;
+- in_sched = true;
+- }
+-
+- printk_delay(level);
+
+- /* This stops the holder of console_sem just where we want him */
+ printed_len = vprintk_store(facility, level, dev_info, fmt, args);
+
+- /* If called from the scheduler, we can not call up(). */
+- if (!in_sched) {
+- /*
+- * Disable preemption to avoid being preempted while holding
+- * console_sem which would prevent anyone from printing to
+- * console
+- */
+- preempt_disable();
+- /*
+- * Try to acquire and then immediately release the console
+- * semaphore. The release will print out buffers and wake up
+- * /dev/kmsg and syslog() users.
+- */
+- if (console_trylock_spinning())
+- console_unlock();
+- preempt_enable();
+- }
+-
+ wake_up_klogd();
+ return printed_len;
+ }
+@@ -2294,9 +2077,6 @@ EXPORT_SYMBOL(printk);
+ #define prb_first_valid_seq(rb) 0
+
+ static u64 syslog_seq;
+-static atomic64_t console_seq = ATOMI64_INIT(0);
+-static u64 exclusive_console_stop_seq;
+-static unsigned long console_dropped;
+
+ static size_t record_print_text(const struct printk_record *r,
+ bool syslog, bool time)
+@@ -2311,8 +2091,6 @@ static ssize_t info_print_ext_header(cha
+ static ssize_t msg_print_ext_body(char *buf, size_t size,
+ char *text, size_t text_len,
+ struct dev_printk_info *dev_info) { return 0; }
+-static void console_lock_spinning_enable(void) { }
+-static int console_lock_spinning_disable_and_check(void) { return 0; }
+ static void call_console_drivers(const char *ext_text, size_t ext_len,
+ const char *text, size_t len) {}
+ static bool suppress_message_printing(int level) { return false; }
+@@ -2578,8 +2356,6 @@ static int have_callable_console(void)
+ */
+ static inline int can_use_console(void)
+ {
+- if (kernel_sync_mode())
+- return false;
+ return cpu_online(raw_smp_processor_id()) || have_callable_console();
+ }
+
+@@ -2599,139 +2375,14 @@ static inline int can_use_console(void)
+ */
+ void console_unlock(void)
+ {
+- static char ext_text[CONSOLE_EXT_LOG_MAX];
+- static char text[LOG_LINE_MAX + PREFIX_MAX];
+- unsigned long flags;
+- bool do_cond_resched, retry;
+- struct printk_info info;
+- struct printk_record r;
+- u64 seq;
+-
+ if (console_suspended) {
+ up_console_sem();
+ return;
+ }
+
+- prb_rec_init_rd(&r, &info, text, sizeof(text));
+-
+- /*
+- * Console drivers are called with interrupts disabled, so
+- * @console_may_schedule should be cleared before; however, we may
+- * end up dumping a lot of lines, for example, if called from
+- * console registration path, and should invoke cond_resched()
+- * between lines if allowable. Not doing so can cause a very long
+- * scheduling stall on a slow console leading to RCU stall and
+- * softlockup warnings which exacerbate the issue with more
+- * messages practically incapacitating the system.
+- *
+- * console_trylock() is not able to detect the preemptive
+- * context reliably. Therefore the value must be stored before
+- * and cleared after the the "again" goto label.
+- */
+- do_cond_resched = console_may_schedule;
+-again:
+- console_may_schedule = 0;
+-
+- /*
+- * We released the console_sem lock, so we need to recheck if
+- * cpu is online and (if not) is there at least one CON_ANYTIME
+- * console.
+- */
+- if (!can_use_console()) {
+- console_locked = 0;
+- up_console_sem();
+- return;
+- }
+-
+- for (;;) {
+- size_t ext_len = 0;
+- size_t len;
+-
+- printk_safe_enter_irqsave(flags);
+-skip:
+- seq = atomic64_read(&console_seq);
+- if (!prb_read_valid(prb, seq, &r))
+- break;
+-
+- if (seq != r.info->seq) {
+- console_dropped += r.info->seq - seq;
+- atomic64_set(&console_seq, r.info->seq);
+- seq = r.info->seq;
+- }
+-
+- if (suppress_message_printing(r.info->level)) {
+- /*
+- * Skip record we have buffered and already printed
+- * directly to the console when we received it, and
+- * record that has level above the console loglevel.
+- */
+- atomic64_set(&console_seq, seq + 1);
+- goto skip;
+- }
+-
+- /* Output to all consoles once old messages replayed. */
+- if (unlikely(exclusive_console &&
+- seq >= exclusive_console_stop_seq)) {
+- exclusive_console = NULL;
+- }
+-
+- /*
+- * Handle extended console text first because later
+- * record_print_text() will modify the record buffer in-place.
+- */
+- if (nr_ext_console_drivers) {
+- ext_len = info_print_ext_header(ext_text,
+- sizeof(ext_text),
+- r.info);
+- ext_len += msg_print_ext_body(ext_text + ext_len,
+- sizeof(ext_text) - ext_len,
+- &r.text_buf[0],
+- r.info->text_len,
+- &r.info->dev_info);
+- }
+- len = record_print_text(&r,
+- console_msg_format & MSG_FORMAT_SYSLOG,
+- printk_time);
+- atomic64_set(&console_seq, seq + 1);
+-
+- /*
+- * While actively printing out messages, if another printk()
+- * were to occur on another CPU, it may wait for this one to
+- * finish. This task can not be preempted if there is a
+- * waiter waiting to take over.
+- */
+- console_lock_spinning_enable();
+-
+- stop_critical_timings(); /* don't trace print latency */
+- call_console_drivers(ext_text, ext_len, text, len);
+- start_critical_timings();
+-
+- if (console_lock_spinning_disable_and_check()) {
+- printk_safe_exit_irqrestore(flags);
+- return;
+- }
+-
+- printk_safe_exit_irqrestore(flags);
+-
+- if (do_cond_resched)
+- cond_resched();
+- }
+-
+ console_locked = 0;
+
+ up_console_sem();
+-
+- /*
+- * Someone could have filled up the buffer again, so re-check if there's
+- * something to flush. In case we cannot trylock the console_sem again,
+- * there's a new owner and the console_unlock() from them will do the
+- * flush, no worries.
+- */
+- retry = prb_read_valid(prb, atomic64_read(&console_seq), NULL);
+- printk_safe_exit_irqrestore(flags);
+-
+- if (retry && console_trylock())
+- goto again;
+ }
+ EXPORT_SYMBOL(console_unlock);
+
+@@ -2781,18 +2432,20 @@ void console_unblank(void)
+ */
+ void console_flush_on_panic(enum con_flush_mode mode)
+ {
+- /*
+- * If someone else is holding the console lock, trylock will fail
+- * and may_schedule may be set. Ignore and proceed to unlock so
+- * that messages are flushed out. As this can be called from any
+- * context and we don't want to get preempted while flushing,
+- * ensure may_schedule is cleared.
+- */
+- console_trylock();
++ struct console *c;
++ u64 seq;
++
++ if (!console_trylock())
++ return;
++
+ console_may_schedule = 0;
+
+- if (mode == CONSOLE_REPLAY_ALL)
+- atomic64_set(&console_seq, prb_first_valid_seq(prb));
++ if (mode == CONSOLE_REPLAY_ALL) {
++ seq = prb_first_valid_seq(prb);
++ for_each_console(c)
++ atomic64_set(&c->printk_seq, seq);
++ }
++
+ console_unlock();
+ }
+
+@@ -2906,6 +2559,8 @@ static int try_enable_new_console(struct
+ return -ENOENT;
+ }
+
++static void console_try_thread(struct console *con);
++
+ /*
+ * The console driver calls this routine during kernel initialization
+ * to register the console printing procedure with printk() and to
+@@ -2927,7 +2582,6 @@ static int try_enable_new_console(struct
+ */
+ void register_console(struct console *newcon)
+ {
+- unsigned long flags;
+ struct console *bcon = NULL;
+ int err;
+
+@@ -2951,6 +2605,8 @@ void register_console(struct console *ne
+ }
+ }
+
++ newcon->thread = NULL;
++
+ if (console_drivers && console_drivers->flags & CON_BOOT)
+ bcon = console_drivers;
+
+@@ -3015,22 +2671,12 @@ void register_console(struct console *ne
+ if (newcon->flags & CON_EXTENDED)
+ nr_ext_console_drivers++;
+
+- if (newcon->flags & CON_PRINTBUFFER) {
+- syslog_lock_irqsave(flags);
+- /*
+- * We're about to replay the log buffer. Only do this to the
+- * just-registered console to avoid excessive message spam to
+- * the already-registered consoles.
+- *
+- * Set exclusive_console with disabled interrupts to reduce
+- * race window with eventual console_flush_on_panic() that
+- * ignores console_lock.
+- */
+- exclusive_console = newcon;
+- exclusive_console_stop_seq = atomic64_read(&console_seq);
+- atomic64_set(&console_seq, syslog_seq);
+- syslog_unlock_irqrestore(flags);
+- }
++ if (newcon->flags & CON_PRINTBUFFER)
++ atomic64_set(&newcon->printk_seq, 0);
++ else
++ atomic64_set(&newcon->printk_seq, prb_next_seq(prb));
++
++ console_try_thread(newcon);
+ console_unlock();
+ console_sysfs_notify();
+
+@@ -3104,6 +2750,9 @@ int unregister_console(struct console *c
+ console_unlock();
+ console_sysfs_notify();
+
++ if (console->thread && !IS_ERR(console->thread))
++ kthread_stop(console->thread);
++
+ if (console->exit)
+ res = console->exit(console);
+
+@@ -3147,6 +2796,154 @@ void __init console_init(void)
+ }
+ }
+
++static int printk_kthread_func(void *data)
++{
++ struct console *con = data;
++ unsigned long dropped = 0;
++ struct printk_info info;
++ struct printk_record r;
++ char *ext_text = NULL;
++ size_t dropped_len;
++ char *dropped_text;
++ int ret = -ENOMEM;
++ char *write_text;
++ u64 printk_seq;
++ size_t len;
++ char *text;
++ int error;
++ u64 seq;
++
++ if (con->flags & CON_EXTENDED) {
++ ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL);
++ if (!ext_text)
++ return ret;
++ }
++ text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
++ dropped_text = kmalloc(64, GFP_KERNEL);
++ if (!text || !dropped_text)
++ goto out;
++
++ if (con->flags & CON_EXTENDED)
++ write_text = ext_text;
++ else
++ write_text = text;
++
++ seq = atomic64_read(&con->printk_seq);
++
++ prb_rec_init_rd(&r, &info, text, LOG_LINE_MAX + PREFIX_MAX);
++
++ for (;;) {
++ error = wait_event_interruptible(log_wait,
++ prb_read_valid(prb, seq, &r) || kthread_should_stop());
++
++ if (kthread_should_stop())
++ break;
++
++ if (error)
++ continue;
++
++ if (seq != r.info->seq) {
++ dropped += r.info->seq - seq;
++ seq = r.info->seq;
++ }
++
++ seq++;
++
++ if (!(con->flags & CON_ENABLED))
++ continue;
++
++ if (suppress_message_printing(r.info->level))
++ continue;
++
++ if (con->flags & CON_EXTENDED) {
++ len = info_print_ext_header(ext_text,
++ CONSOLE_EXT_LOG_MAX,
++ r.info);
++ len += msg_print_ext_body(ext_text + len,
++ CONSOLE_EXT_LOG_MAX - len,
++ &r.text_buf[0], r.info->text_len,
++ &r.info->dev_info);
++ } else {
++ len = record_print_text(&r,
++ console_msg_format & MSG_FORMAT_SYSLOG,
++ printk_time);
++ }
++
++ printk_seq = atomic64_read(&con->printk_seq);
++
++ console_lock();
++ console_may_schedule = 0;
++
++ if (kernel_sync_mode() && con->write_atomic) {
++ console_unlock();
++ break;
++ }
++
++ if (!(con->flags & CON_EXTENDED) && dropped) {
++ dropped_len = snprintf(dropped_text, 64,
++ "** %lu printk messages dropped **\n",
++ dropped);
++ dropped = 0;
++
++ con->write(con, dropped_text, dropped_len);
++ printk_delay(r.info->level);
++ }
++
++ con->write(con, write_text, len);
++ if (len)
++ printk_delay(r.info->level);
++
++ atomic64_cmpxchg_relaxed(&con->printk_seq, printk_seq, seq);
++
++ console_unlock();
++ }
++out:
++ kfree(dropped_text);
++ kfree(text);
++ kfree(ext_text);
++ pr_info("%sconsole [%s%d]: printing thread stopped\n",
++ (con->flags & CON_BOOT) ? "boot" : "" ,
++ con->name, con->index);
++ return ret;
++}
++
++static void start_printk_kthread(struct console *con)
++{
++ con->thread = kthread_run(printk_kthread_func, con,
++ "pr/%s%d", con->name, con->index);
++ if (IS_ERR(con->thread)) {
++ pr_err("%sconsole [%s%d]: unable to start printing thread\n",
++ (con->flags & CON_BOOT) ? "boot" : "" ,
++ con->name, con->index);
++ return;
++ }
++ pr_info("%sconsole [%s%d]: printing thread started\n",
++ (con->flags & CON_BOOT) ? "boot" : "" ,
++ con->name, con->index);
++}
++
++static bool kthreads_started;
++
++static void console_try_thread(struct console *con)
++{
++ unsigned long irqflags;
++ int sprint_id;
++ char *buf;
++
++ if (kthreads_started) {
++ start_printk_kthread(con);
++ return;
++ }
++
++ buf = get_sprint_buf(&sprint_id, &irqflags);
++ if (!buf)
++ return;
++
++ print_sync_until(prb_next_seq(prb), con, buf, PREFIX_MAX + LOG_LINE_MAX);
++
++ put_sprint_buf(sprint_id, irqflags);
++}
++
+ /*
+ * Some boot consoles access data that is in the init section and which will
+ * be discarded after the initcalls have been run. To make sure that no code
+@@ -3186,6 +2983,13 @@ static int __init printk_late_init(void)
+ unregister_console(con);
+ }
+ }
++
++ console_lock();
++ for_each_console(con)
++ start_printk_kthread(con);
++ kthreads_started = true;
++ console_unlock();
++
+ ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL,
+ console_cpu_notify);
+ WARN_ON(ret < 0);
+@@ -3201,7 +3005,6 @@ late_initcall(printk_late_init);
+ * Delayed printk version, for scheduler-internal messages:
+ */
+ #define PRINTK_PENDING_WAKEUP 0x01
+-#define PRINTK_PENDING_OUTPUT 0x02
+
+ static DEFINE_PER_CPU(int, printk_pending);
+
+@@ -3209,12 +3012,6 @@ static void wake_up_klogd_work_func(stru
+ {
+ int pending = __this_cpu_xchg(printk_pending, 0);
+
+- if (pending & PRINTK_PENDING_OUTPUT) {
+- /* If trylock fails, someone else is doing the printing */
+- if (console_trylock())
+- console_unlock();
+- }
+-
+ if (pending & PRINTK_PENDING_WAKEUP)
+ wake_up_interruptible(&log_wait);
+ }
+@@ -3239,13 +3036,6 @@ void wake_up_klogd(void)
+
+ void defer_console_output(void)
+ {
+- if (!printk_percpu_data_ready())
+- return;
+-
+- preempt_disable();
+- __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
+- irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
+- preempt_enable();
+ }
+
+ int vprintk_deferred(const char *fmt, va_list args)
diff --git a/debian/patches-rt/0012-printk-ringbuffer-avoid-memcpy-on-state_var.patch b/debian/patches-rt/0012-printk-ringbuffer-avoid-memcpy-on-state_var.patch
new file mode 100644
index 000000000..2f72b4953
--- /dev/null
+++ b/debian/patches-rt/0012-printk-ringbuffer-avoid-memcpy-on-state_var.patch
@@ -0,0 +1,42 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 14 Sep 2020 11:54:03 +0206
+Subject: [PATCH 12/25] printk: ringbuffer: avoid memcpy() on state_var
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+@state_var is copied as part of the descriptor copying via
+memcpy(). This is not allowed because @state_var is an atomic type,
+which in some implementations may contain a spinlock.
+
+Avoid using memcpy() with @state_var by explicitly copying the other
+fields of the descriptor. @state_var is set using atomic set
+operator before returning.
+
+Fixes: b6cf8b3f3312 ("printk: add lockless ringbuffer")
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200914094803.27365-2-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk_ringbuffer.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -412,9 +412,14 @@ static enum desc_state desc_read(struct
+
+ /*
+ * Copy the descriptor data. The data is not valid until the
+- * state has been re-checked.
++ * state has been re-checked. A memcpy() for all of @desc
++ * cannot be used because of the atomic_t @state_var field.
+ */
+- memcpy(desc_out, desc, sizeof(*desc_out)); /* LMM(desc_read:C) */
++ memcpy(&desc_out->info, &desc->info, sizeof(desc_out->info)); /* LMM(desc_read:C) */
++ memcpy(&desc_out->text_blk_lpos, &desc->text_blk_lpos,
++ sizeof(desc_out->text_blk_lpos)); /* also part of desc_read:C */
++ memcpy(&desc_out->dict_blk_lpos, &desc->dict_blk_lpos,
++ sizeof(desc_out->dict_blk_lpos)); /* also part of desc_read:C */
+
+ /*
+ * 1. Guarantee the descriptor content is loaded before re-checking
diff --git a/debian/patches-rt/0012-sched-rt-Use-cpumask_any-_distribute.patch b/debian/patches-rt/0012-sched-rt-Use-cpumask_any-_distribute.patch
index 9a84c18c6..0e512a95d 100644
--- a/debian/patches-rt/0012-sched-rt-Use-cpumask_any-_distribute.patch
+++ b/debian/patches-rt/0012-sched-rt-Use-cpumask_any-_distribute.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz@infradead.org>
-Date: Mon, 5 Oct 2020 16:57:29 +0200
-Subject: [PATCH 12/17] sched,rt: Use cpumask_any*_distribute()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Date: Fri, 23 Oct 2020 12:12:10 +0200
+Subject: [PATCH 12/19] sched,rt: Use cpumask_any*_distribute()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Replace a bunch of cpumask_any*() instances with
cpumask_any*_distribute(), by injecting this little bit of random in
@@ -12,10 +12,10 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/cpumask.h | 6 ++++++
- kernel/sched/deadline.c | 2 +-
+ kernel/sched/deadline.c | 6 +++---
kernel/sched/rt.c | 6 +++---
lib/cpumask.c | 18 ++++++++++++++++++
- 4 files changed, 28 insertions(+), 4 deletions(-)
+ 4 files changed, 30 insertions(+), 6 deletions(-)
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -41,6 +41,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* for_each_cpu - iterate over every cpu in a mask
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
+@@ -1978,8 +1978,8 @@ static int find_later_rq(struct task_str
+ return this_cpu;
+ }
+
+- best_cpu = cpumask_first_and(later_mask,
+- sched_domain_span(sd));
++ best_cpu = cpumask_any_and_distribute(later_mask,
++ sched_domain_span(sd));
+ /*
+ * Last chance: if a CPU being in both later_mask
+ * and current sd span is valid, that becomes our
@@ -2001,7 +2001,7 @@ static int find_later_rq(struct task_str
if (this_cpu != -1)
return this_cpu;
diff --git a/debian/patches-rt/0012-seqlock-seqcount_LOCKNAME_t-Introduce-PREEMPT_RT-sup.patch b/debian/patches-rt/0012-seqlock-seqcount_LOCKNAME_t-Introduce-PREEMPT_RT-sup.patch
index 0e3b5929e..1acee52b5 100644
--- a/debian/patches-rt/0012-seqlock-seqcount_LOCKNAME_t-Introduce-PREEMPT_RT-sup.patch
+++ b/debian/patches-rt/0012-seqlock-seqcount_LOCKNAME_t-Introduce-PREEMPT_RT-sup.patch
@@ -2,7 +2,7 @@ From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
Date: Fri, 4 Sep 2020 17:32:30 +0200
Subject: [PATCH 12/13] seqlock: seqcount_LOCKNAME_t: Introduce PREEMPT_RT
support
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Preemption must be disabled before entering a sequence counter write
side critical section. Otherwise the read side section can preempt the
diff --git a/debian/patches-rt/0014-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch b/debian/patches-rt/0013-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch
index 6483750fa..bffe0529b 100644
--- a/debian/patches-rt/0014-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch
+++ b/debian/patches-rt/0013-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch
@@ -1,17 +1,17 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 16:36:39 +0200
-Subject: [PATCH 14/23] locking/rtmutex: export lockdep-less version of
+Subject: [PATCH 13/22] locking/rtmutex: export lockdep-less version of
rt_mutex's lock, trylock and unlock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Required for lock implementation ontop of rtmutex.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/locking/rtmutex.c | 58 ++++++++++++++++++++++++++--------------
+ kernel/locking/rtmutex.c | 54 ++++++++++++++++++++++++++++------------
kernel/locking/rtmutex_common.h | 3 ++
- 2 files changed, 42 insertions(+), 19 deletions(-)
+ 2 files changed, 41 insertions(+), 16 deletions(-)
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -69,16 +69,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
-@@ -1552,12 +1564,18 @@ int __sched __rt_mutex_futex_trylock(str
- */
- int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
- {
-- might_sleep();
--
-- return rt_mutex_fastlock(lock, TASK_KILLABLE, rt_mutex_slowlock);
-+ return rt_mutex_lock_state(lock, 0, TASK_KILLABLE);
+@@ -1541,6 +1553,14 @@ int __sched __rt_mutex_futex_trylock(str
+ return __rt_mutex_slowtrylock(lock);
}
- EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
+int __sched __rt_mutex_trylock(struct rt_mutex *lock)
+{
@@ -91,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* rt_mutex_trylock - try to lock a rt_mutex
*
-@@ -1573,10 +1591,7 @@ int __sched rt_mutex_trylock(struct rt_m
+@@ -1556,10 +1576,7 @@ int __sched rt_mutex_trylock(struct rt_m
{
int ret;
@@ -103,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (ret)
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-@@ -1584,6 +1599,11 @@ int __sched rt_mutex_trylock(struct rt_m
+@@ -1567,6 +1584,11 @@ int __sched rt_mutex_trylock(struct rt_m
}
EXPORT_SYMBOL_GPL(rt_mutex_trylock);
diff --git a/debian/patches-rt/0013-printk-remove-deferred-printing.patch b/debian/patches-rt/0013-printk-remove-deferred-printing.patch
new file mode 100644
index 000000000..f52a3f727
--- /dev/null
+++ b/debian/patches-rt/0013-printk-remove-deferred-printing.patch
@@ -0,0 +1,360 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 19 Oct 2020 22:53:30 +0206
+Subject: [PATCH 13/15] printk: remove deferred printing
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+Since printing occurs either atomically or from the printing
+kthread, there is no need for any deferring or tracking possible
+recursion paths. Remove all printk context tracking.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm/kernel/smp.c | 2 -
+ arch/powerpc/kexec/crash.c | 3 -
+ include/linux/hardirq.h | 2 -
+ include/linux/printk.h | 12 ------
+ kernel/printk/Makefile | 1
+ kernel/printk/internal.h | 70 -----------------------------------
+ kernel/printk/printk.c | 40 ++++++--------------
+ kernel/printk/printk_safe.c | 86 --------------------------------------------
+ kernel/trace/trace.c | 2 -
+ 9 files changed, 12 insertions(+), 206 deletions(-)
+ delete mode 100644 kernel/printk/internal.h
+ delete mode 100644 kernel/printk/printk_safe.c
+
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -680,11 +680,9 @@ void handle_IPI(int ipinr, struct pt_reg
+ break;
+
+ case IPI_CPU_BACKTRACE:
+- printk_nmi_enter();
+ irq_enter();
+ nmi_cpu_backtrace(regs);
+ irq_exit();
+- printk_nmi_exit();
+ break;
+
+ default:
+--- a/arch/powerpc/kexec/crash.c
++++ b/arch/powerpc/kexec/crash.c
+@@ -311,9 +311,6 @@ void default_machine_crash_shutdown(stru
+ unsigned int i;
+ int (*old_handler)(struct pt_regs *regs);
+
+- /* Avoid hardlocking with irresponsive CPU holding logbuf_lock */
+- printk_nmi_enter();
+-
+ /*
+ * This function is only called after the system
+ * has panicked or is otherwise in a critical state.
+--- a/include/linux/hardirq.h
++++ b/include/linux/hardirq.h
+@@ -115,7 +115,6 @@ extern void rcu_nmi_exit(void);
+ do { \
+ lockdep_off(); \
+ arch_nmi_enter(); \
+- printk_nmi_enter(); \
+ BUG_ON(in_nmi() == NMI_MASK); \
+ __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
+ } while (0)
+@@ -134,7 +133,6 @@ extern void rcu_nmi_exit(void);
+ do { \
+ BUG_ON(!in_nmi()); \
+ __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
+- printk_nmi_exit(); \
+ arch_nmi_exit(); \
+ lockdep_on(); \
+ } while (0)
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -147,18 +147,6 @@ static inline __printf(1, 2) __cold
+ void early_printk(const char *s, ...) { }
+ #endif
+
+-#ifdef CONFIG_PRINTK_NMI
+-extern void printk_nmi_enter(void);
+-extern void printk_nmi_exit(void);
+-extern void printk_nmi_direct_enter(void);
+-extern void printk_nmi_direct_exit(void);
+-#else
+-static inline void printk_nmi_enter(void) { }
+-static inline void printk_nmi_exit(void) { }
+-static inline void printk_nmi_direct_enter(void) { }
+-static inline void printk_nmi_direct_exit(void) { }
+-#endif /* PRINTK_NMI */
+-
+ struct dev_printk_info;
+
+ #ifdef CONFIG_PRINTK
+--- a/kernel/printk/Makefile
++++ b/kernel/printk/Makefile
+@@ -1,5 +1,4 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ obj-y = printk.o
+-obj-$(CONFIG_PRINTK) += printk_safe.o
+ obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
+ obj-$(CONFIG_PRINTK) += printk_ringbuffer.o
+--- a/kernel/printk/internal.h
++++ /dev/null
+@@ -1,70 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- * internal.h - printk internal definitions
+- */
+-#include <linux/percpu.h>
+-
+-#ifdef CONFIG_PRINTK
+-
+-#define PRINTK_SAFE_CONTEXT_MASK 0x007ffffff
+-#define PRINTK_NMI_DIRECT_CONTEXT_MASK 0x008000000
+-#define PRINTK_NMI_CONTEXT_MASK 0xff0000000
+-
+-#define PRINTK_NMI_CONTEXT_OFFSET 0x010000000
+-
+-__printf(4, 0)
+-int vprintk_store(int facility, int level,
+- const struct dev_printk_info *dev_info,
+- const char *fmt, va_list args);
+-
+-__printf(1, 0) int vprintk_default(const char *fmt, va_list args);
+-__printf(1, 0) int vprintk_deferred(const char *fmt, va_list args);
+-__printf(1, 0) int vprintk_func(const char *fmt, va_list args);
+-void __printk_safe_enter(void);
+-void __printk_safe_exit(void);
+-
+-bool printk_percpu_data_ready(void);
+-
+-#define printk_safe_enter_irqsave(flags) \
+- do { \
+- local_irq_save(flags); \
+- __printk_safe_enter(); \
+- } while (0)
+-
+-#define printk_safe_exit_irqrestore(flags) \
+- do { \
+- __printk_safe_exit(); \
+- local_irq_restore(flags); \
+- } while (0)
+-
+-#define printk_safe_enter_irq() \
+- do { \
+- local_irq_disable(); \
+- __printk_safe_enter(); \
+- } while (0)
+-
+-#define printk_safe_exit_irq() \
+- do { \
+- __printk_safe_exit(); \
+- local_irq_enable(); \
+- } while (0)
+-
+-void defer_console_output(void);
+-
+-#else
+-
+-__printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; }
+-
+-/*
+- * In !PRINTK builds we still export console_sem
+- * semaphore and some of console functions (console_unlock()/etc.), so
+- * printk-safe must preserve the existing local IRQ guarantees.
+- */
+-#define printk_safe_enter_irqsave(flags) local_irq_save(flags)
+-#define printk_safe_exit_irqrestore(flags) local_irq_restore(flags)
+-
+-#define printk_safe_enter_irq() local_irq_disable()
+-#define printk_safe_exit_irq() local_irq_enable()
+-
+-static inline bool printk_percpu_data_ready(void) { return false; }
+-#endif /* CONFIG_PRINTK */
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -60,7 +60,6 @@
+ #include "printk_ringbuffer.h"
+ #include "console_cmdline.h"
+ #include "braille.h"
+-#include "internal.h"
+
+ int console_printk[4] = {
+ CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */
+@@ -230,19 +229,7 @@ static int nr_ext_console_drivers;
+
+ static int __down_trylock_console_sem(unsigned long ip)
+ {
+- int lock_failed;
+- unsigned long flags;
+-
+- /*
+- * Here and in __up_console_sem() we need to be in safe mode,
+- * because spindump/WARN/etc from under console ->lock will
+- * deadlock in printk()->down_trylock_console_sem() otherwise.
+- */
+- printk_safe_enter_irqsave(flags);
+- lock_failed = down_trylock(&console_sem);
+- printk_safe_exit_irqrestore(flags);
+-
+- if (lock_failed)
++ if (down_trylock(&console_sem))
+ return 1;
+ mutex_acquire(&console_lock_dep_map, 0, 1, ip);
+ return 0;
+@@ -251,13 +238,9 @@ static int __down_trylock_console_sem(un
+
+ static void __up_console_sem(unsigned long ip)
+ {
+- unsigned long flags;
+-
+ mutex_release(&console_lock_dep_map, ip);
+
+- printk_safe_enter_irqsave(flags);
+ up(&console_sem);
+- printk_safe_exit_irqrestore(flags);
+ }
+ #define up_console_sem() __up_console_sem(_RET_IP_)
+
+@@ -2027,6 +2010,16 @@ int vprintk_default(const char *fmt, va_
+ }
+ EXPORT_SYMBOL_GPL(vprintk_default);
+
++__printf(1, 0) int vprintk_func(const char *fmt, va_list args)
++{
++#ifdef CONFIG_KGDB_KDB
++ /* Allow to pass printk() to kdb but avoid a recursion. */
++ if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0))
++ return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
++#endif
++ return vprintk_default(fmt, args);
++}
++
+ asmlinkage int vprintk(const char *fmt, va_list args)
+ {
+ return vprintk_func(fmt, args);
+@@ -3034,18 +3027,9 @@ void wake_up_klogd(void)
+ preempt_enable();
+ }
+
+-void defer_console_output(void)
+-{
+-}
+-
+ int vprintk_deferred(const char *fmt, va_list args)
+ {
+- int r;
+-
+- r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
+- defer_console_output();
+-
+- return r;
++ return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
+ }
+
+ int printk_deferred(const char *fmt, ...)
+--- a/kernel/printk/printk_safe.c
++++ /dev/null
+@@ -1,86 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * printk_safe.c - Safe printk for printk-deadlock-prone contexts
+- */
+-
+-#include <linux/preempt.h>
+-#include <linux/spinlock.h>
+-#include <linux/debug_locks.h>
+-#include <linux/kdb.h>
+-#include <linux/smp.h>
+-#include <linux/cpumask.h>
+-#include <linux/irq_work.h>
+-#include <linux/printk.h>
+-#include <linux/kprobes.h>
+-
+-#include "internal.h"
+-
+-static DEFINE_PER_CPU(int, printk_context);
+-
+-#ifdef CONFIG_PRINTK_NMI
+-void noinstr printk_nmi_enter(void)
+-{
+- this_cpu_add(printk_context, PRINTK_NMI_CONTEXT_OFFSET);
+-}
+-
+-void noinstr printk_nmi_exit(void)
+-{
+- this_cpu_sub(printk_context, PRINTK_NMI_CONTEXT_OFFSET);
+-}
+-
+-/*
+- * Marks a code that might produce many messages in NMI context
+- * and the risk of losing them is more critical than eventual
+- * reordering.
+- */
+-void printk_nmi_direct_enter(void)
+-{
+- if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
+- this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
+-}
+-
+-void printk_nmi_direct_exit(void)
+-{
+- this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
+-}
+-
+-#endif /* CONFIG_PRINTK_NMI */
+-
+-/* Can be preempted by NMI. */
+-void __printk_safe_enter(void)
+-{
+- this_cpu_inc(printk_context);
+-}
+-
+-/* Can be preempted by NMI. */
+-void __printk_safe_exit(void)
+-{
+- this_cpu_dec(printk_context);
+-}
+-
+-__printf(1, 0) int vprintk_func(const char *fmt, va_list args)
+-{
+-#ifdef CONFIG_KGDB_KDB
+- /* Allow to pass printk() to kdb but avoid a recursion. */
+- if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0))
+- return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
+-#endif
+-
+- /*
+- * Store to the ringbuffer, even in NMI. But avoid calling console
+- * drivers that might have their own locks.
+- */
+- if (this_cpu_read(printk_context) &
+- (PRINTK_NMI_DIRECT_CONTEXT_MASK |
+- PRINTK_NMI_CONTEXT_MASK |
+- PRINTK_SAFE_CONTEXT_MASK)) {
+- int len;
+-
+- len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
+- defer_console_output();
+- return len;
+- }
+-
+- /* No obstacles. */
+- return vprintk_default(fmt, args);
+-}
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -9249,7 +9249,6 @@ void ftrace_dump(enum ftrace_dump_mode o
+ tracing_off();
+
+ local_irq_save(flags);
+- printk_nmi_direct_enter();
+
+ /* Simulate the iterator */
+ trace_init_global_iter(&iter);
+@@ -9329,7 +9328,6 @@ void ftrace_dump(enum ftrace_dump_mode o
+ atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
+ }
+ atomic_dec(&dump_running);
+- printk_nmi_direct_exit();
+ local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL_GPL(ftrace_dump);
diff --git a/debian/patches-rt/0013-printk-ringbuffer-relocate-get_data.patch b/debian/patches-rt/0013-printk-ringbuffer-relocate-get_data.patch
new file mode 100644
index 000000000..c092067ac
--- /dev/null
+++ b/debian/patches-rt/0013-printk-ringbuffer-relocate-get_data.patch
@@ -0,0 +1,149 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 14 Sep 2020 14:39:49 +0206
+Subject: [PATCH 13/25] printk: ringbuffer: relocate get_data()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+Move the internal get_data() function as-is above prb_reserve() so
+that a later change can make use of the static function.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200914123354.832-2-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk_ringbuffer.c | 116 +++++++++++++++++++-------------------
+ 1 file changed, 58 insertions(+), 58 deletions(-)
+
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -1055,6 +1055,64 @@ static unsigned int space_used(struct pr
+ DATA_SIZE(data_ring) - DATA_INDEX(data_ring, blk_lpos->begin));
+ }
+
++/*
++ * Given @blk_lpos, return a pointer to the writer data from the data block
++ * and calculate the size of the data part. A NULL pointer is returned if
++ * @blk_lpos specifies values that could never be legal.
++ *
++ * This function (used by readers) performs strict validation on the lpos
++ * values to possibly detect bugs in the writer code. A WARN_ON_ONCE() is
++ * triggered if an internal error is detected.
++ */
++static const char *get_data(struct prb_data_ring *data_ring,
++ struct prb_data_blk_lpos *blk_lpos,
++ unsigned int *data_size)
++{
++ struct prb_data_block *db;
++
++ /* Data-less data block description. */
++ if (LPOS_DATALESS(blk_lpos->begin) && LPOS_DATALESS(blk_lpos->next)) {
++ if (blk_lpos->begin == NO_LPOS && blk_lpos->next == NO_LPOS) {
++ *data_size = 0;
++ return "";
++ }
++ return NULL;
++ }
++
++ /* Regular data block: @begin less than @next and in same wrap. */
++ if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) &&
++ blk_lpos->begin < blk_lpos->next) {
++ db = to_block(data_ring, blk_lpos->begin);
++ *data_size = blk_lpos->next - blk_lpos->begin;
++
++ /* Wrapping data block: @begin is one wrap behind @next. */
++ } else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) ==
++ DATA_WRAPS(data_ring, blk_lpos->next)) {
++ db = to_block(data_ring, 0);
++ *data_size = DATA_INDEX(data_ring, blk_lpos->next);
++
++ /* Illegal block description. */
++ } else {
++ WARN_ON_ONCE(1);
++ return NULL;
++ }
++
++ /* A valid data block will always be aligned to the ID size. */
++ if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) ||
++ WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) {
++ return NULL;
++ }
++
++ /* A valid data block will always have at least an ID. */
++ if (WARN_ON_ONCE(*data_size < sizeof(db->id)))
++ return NULL;
++
++ /* Subtract block ID space from size to reflect data size. */
++ *data_size -= sizeof(db->id);
++
++ return &db->data[0];
++}
++
+ /**
+ * prb_reserve() - Reserve space in the ringbuffer.
+ *
+@@ -1210,64 +1268,6 @@ void prb_commit(struct prb_reserved_entr
+ }
+
+ /*
+- * Given @blk_lpos, return a pointer to the writer data from the data block
+- * and calculate the size of the data part. A NULL pointer is returned if
+- * @blk_lpos specifies values that could never be legal.
+- *
+- * This function (used by readers) performs strict validation on the lpos
+- * values to possibly detect bugs in the writer code. A WARN_ON_ONCE() is
+- * triggered if an internal error is detected.
+- */
+-static const char *get_data(struct prb_data_ring *data_ring,
+- struct prb_data_blk_lpos *blk_lpos,
+- unsigned int *data_size)
+-{
+- struct prb_data_block *db;
+-
+- /* Data-less data block description. */
+- if (LPOS_DATALESS(blk_lpos->begin) && LPOS_DATALESS(blk_lpos->next)) {
+- if (blk_lpos->begin == NO_LPOS && blk_lpos->next == NO_LPOS) {
+- *data_size = 0;
+- return "";
+- }
+- return NULL;
+- }
+-
+- /* Regular data block: @begin less than @next and in same wrap. */
+- if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) &&
+- blk_lpos->begin < blk_lpos->next) {
+- db = to_block(data_ring, blk_lpos->begin);
+- *data_size = blk_lpos->next - blk_lpos->begin;
+-
+- /* Wrapping data block: @begin is one wrap behind @next. */
+- } else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) ==
+- DATA_WRAPS(data_ring, blk_lpos->next)) {
+- db = to_block(data_ring, 0);
+- *data_size = DATA_INDEX(data_ring, blk_lpos->next);
+-
+- /* Illegal block description. */
+- } else {
+- WARN_ON_ONCE(1);
+- return NULL;
+- }
+-
+- /* A valid data block will always be aligned to the ID size. */
+- if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) ||
+- WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) {
+- return NULL;
+- }
+-
+- /* A valid data block will always have at least an ID. */
+- if (WARN_ON_ONCE(*data_size < sizeof(db->id)))
+- return NULL;
+-
+- /* Subtract block ID space from size to reflect data size. */
+- *data_size -= sizeof(db->id);
+-
+- return &db->data[0];
+-}
+-
+-/*
+ * Count the number of lines in provided text. All text has at least 1 line
+ * (even if @text_size is 0). Each '\n' processed is counted as an additional
+ * line.
diff --git a/debian/patches-rt/0013-printk-track-seq-per-console.patch b/debian/patches-rt/0013-printk-track-seq-per-console.patch
deleted file mode 100644
index c3f3bb19b..000000000
--- a/debian/patches-rt/0013-printk-track-seq-per-console.patch
+++ /dev/null
@@ -1,93 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:51 +0100
-Subject: [PATCH 13/25] printk: track seq per console
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-Allow each console to track which seq record was last printed. This
-simplifies identifying dropped records.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/console.h | 1 +
- kernel/printk/printk.c | 30 +++++++++++++++++++++++++++---
- 2 files changed, 28 insertions(+), 3 deletions(-)
-
---- a/include/linux/console.h
-+++ b/include/linux/console.h
-@@ -150,6 +150,7 @@ struct console {
- short flags;
- short index;
- int cflag;
-+ unsigned long printk_seq;
- void *data;
- struct console *next;
- };
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -1515,6 +1515,16 @@ SYSCALL_DEFINE3(syslog, int, type, char
- return do_syslog(type, buf, len, SYSLOG_FROM_READER);
- }
-
-+static void print_console_dropped(struct console *con, u64 count)
-+{
-+ char text[64];
-+ int len;
-+
-+ len = sprintf(text, "** %llu printk message%s dropped **\n",
-+ count, count > 1 ? "s" : "");
-+ con->write(con, text, len);
-+}
-+
- static void format_text(struct printk_log *msg, u64 seq,
- char *ext_text, size_t *ext_len,
- char *text, size_t *len, bool time)
-@@ -1548,7 +1558,7 @@ static void format_text(struct printk_lo
- * log_buf[start] to log_buf[end - 1].
- * The console_lock must be held.
- */
--static void call_console_drivers(const char *ext_text, size_t ext_len,
-+static void call_console_drivers(u64 seq, const char *ext_text, size_t ext_len,
- const char *text, size_t len)
- {
- struct console *con;
-@@ -1563,6 +1573,19 @@ static void call_console_drivers(const c
- if (!cpu_online(raw_smp_processor_id()) &&
- !(con->flags & CON_ANYTIME))
- continue;
-+ if (con->printk_seq >= seq)
-+ continue;
-+
-+ con->printk_seq++;
-+ if (con->printk_seq < seq) {
-+ print_console_dropped(con, seq - con->printk_seq);
-+ con->printk_seq = seq;
-+ }
-+
-+ /* for supressed messages, only seq is updated */
-+ if (len == 0 && ext_len == 0)
-+ continue;
-+
- if (con->flags & CON_EXTENDED)
- con->write(con, ext_text, ext_len);
- else
-@@ -1791,7 +1814,7 @@ static ssize_t msg_print_ext_header(char
- static ssize_t msg_print_ext_body(char *buf, size_t size,
- char *dict, size_t dict_len,
- char *text, size_t text_len) { return 0; }
--static void call_console_drivers(const char *ext_text, size_t ext_len,
-+static void call_console_drivers(u64 seq, const char *ext_text, size_t ext_len,
- const char *text, size_t len) {}
- static size_t msg_print_text(const struct printk_log *msg, bool syslog,
- bool time, char *buf, size_t size) { return 0; }
-@@ -2594,8 +2617,9 @@ static int printk_kthread_func(void *dat
-
- console_lock();
- console_may_schedule = 0;
-+ call_console_drivers(master_seq, ext_text,
-+ ext_len, text, len);
- if (len > 0 || ext_len > 0) {
-- call_console_drivers(ext_text, ext_len, text, len);
- boot_delay_msec(msg->level);
- printk_delay();
- }
diff --git a/debian/patches-rt/0013-sched-rt-Use-the-full-cpumask-for-balancing.patch b/debian/patches-rt/0013-sched-rt-Use-the-full-cpumask-for-balancing.patch
index 4bd131666..402d7733e 100644
--- a/debian/patches-rt/0013-sched-rt-Use-the-full-cpumask-for-balancing.patch
+++ b/debian/patches-rt/0013-sched-rt-Use-the-full-cpumask-for-balancing.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz@infradead.org>
-Date: Mon, 5 Oct 2020 16:57:30 +0200
-Subject: [PATCH 13/17] sched,rt: Use the full cpumask for balancing
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Date: Fri, 23 Oct 2020 12:12:11 +0200
+Subject: [PATCH 13/19] sched,rt: Use the full cpumask for balancing
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
We want migrate_disable() tasks to get PULLs in order for them to PUSH
away the higher priority task.
diff --git a/debian/patches-rt/0013-seqlock-PREEMPT_RT-Do-not-starve-seqlock_t-writers.patch b/debian/patches-rt/0013-seqlock-PREEMPT_RT-Do-not-starve-seqlock_t-writers.patch
index 64a0d8320..3d65c613c 100644
--- a/debian/patches-rt/0013-seqlock-PREEMPT_RT-Do-not-starve-seqlock_t-writers.patch
+++ b/debian/patches-rt/0013-seqlock-PREEMPT_RT-Do-not-starve-seqlock_t-writers.patch
@@ -1,7 +1,7 @@
From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
Date: Fri, 4 Sep 2020 17:32:31 +0200
Subject: [PATCH 13/13] seqlock: PREEMPT_RT: Do not starve seqlock_t writers
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
On PREEMPT_RT, seqlock_t is transformed to a sleeping lock that do not
disable preemption. A seqlock_t reader can thus preempt its write side
diff --git a/debian/patches-rt/0014-printk-add-console-handover.patch b/debian/patches-rt/0014-printk-add-console-handover.patch
new file mode 100644
index 000000000..453d38ee8
--- /dev/null
+++ b/debian/patches-rt/0014-printk-add-console-handover.patch
@@ -0,0 +1,68 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 19 Oct 2020 23:03:44 +0206
+Subject: [PATCH 14/15] printk: add console handover
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+If earlyprintk is used, a boot console will print directly to the
+console immediately. The boot console will unregister itself as soon
+as a non-boot console registers. However, the non-boot console does
+not begin printing until its kthread has started. Since this happens
+much later, there is a long pause in the console output. If the
+ringbuffer is small, messages could even be dropped during the
+pause.
+
+Add a new CON_HANDOVER console flag to be used internally by printk
+in order to track which non-boot console took over from a boot
+console. If handover consoles have implemented write_atomic(), they
+are allowed to print directly to the console until their kthread can
+take over.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 1 +
+ kernel/printk/printk.c | 8 +++++++-
+ 2 files changed, 8 insertions(+), 1 deletion(-)
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -137,6 +137,7 @@ static inline int con_debug_leave(void)
+ #define CON_ANYTIME (16) /* Safe to call when cpu is offline */
+ #define CON_BRL (32) /* Used for a braille device */
+ #define CON_EXTENDED (64) /* Use the extended output format a la /dev/kmsg */
++#define CON_HANDOVER (128) /* Device was previously a boot console. */
+
+ struct console {
+ char name[16];
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1755,6 +1755,8 @@ static bool console_can_sync(struct cons
+ return false;
+ if (con->write_atomic && kernel_sync_mode())
+ return true;
++ if (con->write_atomic && (con->flags & CON_HANDOVER) && !con->thread)
++ return true;
+ if (con->write && (con->flags & CON_BOOT) && !con->thread)
+ return true;
+ return false;
+@@ -1766,6 +1768,8 @@ static bool call_sync_console_driver(str
+ return false;
+ if (con->write_atomic && kernel_sync_mode())
+ con->write_atomic(con, text, text_len);
++ else if (con->write_atomic && (con->flags & CON_HANDOVER) && !con->thread)
++ con->write_atomic(con, text, text_len);
+ else if (con->write && (con->flags & CON_BOOT) && !con->thread)
+ con->write(con, text, text_len);
+ else
+@@ -2641,8 +2645,10 @@ void register_console(struct console *ne
+ * the real console are the same physical device, it's annoying to
+ * see the beginning boot messages twice
+ */
+- if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV))
++ if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) {
+ newcon->flags &= ~CON_PRINTBUFFER;
++ newcon->flags |= CON_HANDOVER;
++ }
+
+ /*
+ * Put this console in the list - keep the
diff --git a/debian/patches-rt/0014-printk-do-boot_delay_msec-inside-printk_delay.patch b/debian/patches-rt/0014-printk-do-boot_delay_msec-inside-printk_delay.patch
deleted file mode 100644
index 60b82ac22..000000000
--- a/debian/patches-rt/0014-printk-do-boot_delay_msec-inside-printk_delay.patch
+++ /dev/null
@@ -1,72 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:52 +0100
-Subject: [PATCH 14/25] printk: do boot_delay_msec inside printk_delay
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-Both functions needed to be called one after the other, so just
-integrate boot_delay_msec into printk_delay for simplification.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk.c | 35 +++++++++++++++++------------------
- 1 file changed, 17 insertions(+), 18 deletions(-)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -1515,6 +1515,21 @@ SYSCALL_DEFINE3(syslog, int, type, char
- return do_syslog(type, buf, len, SYSLOG_FROM_READER);
- }
-
-+int printk_delay_msec __read_mostly;
-+
-+static inline void printk_delay(int level)
-+{
-+ boot_delay_msec(level);
-+ if (unlikely(printk_delay_msec)) {
-+ int m = printk_delay_msec;
-+
-+ while (m--) {
-+ mdelay(1);
-+ touch_nmi_watchdog();
-+ }
-+ }
-+}
-+
- static void print_console_dropped(struct console *con, u64 count)
- {
- char text[64];
-@@ -1593,20 +1608,6 @@ static void call_console_drivers(u64 seq
- }
- }
-
--int printk_delay_msec __read_mostly;
--
--static inline void printk_delay(void)
--{
-- if (unlikely(printk_delay_msec)) {
-- int m = printk_delay_msec;
--
-- while (m--) {
-- mdelay(1);
-- touch_nmi_watchdog();
-- }
-- }
--}
--
- static inline u32 printk_caller_id(void)
- {
- return in_task() ? task_pid_nr(current) :
-@@ -2619,10 +2620,8 @@ static int printk_kthread_func(void *dat
- console_may_schedule = 0;
- call_console_drivers(master_seq, ext_text,
- ext_len, text, len);
-- if (len > 0 || ext_len > 0) {
-- boot_delay_msec(msg->level);
-- printk_delay();
-- }
-+ if (len > 0 || ext_len > 0)
-+ printk_delay(msg->level);
- console_unlock();
- }
-
diff --git a/debian/patches-rt/0014-printk-ringbuffer-add-BLK_DATALESS-macro.patch b/debian/patches-rt/0014-printk-ringbuffer-add-BLK_DATALESS-macro.patch
new file mode 100644
index 000000000..e6789a432
--- /dev/null
+++ b/debian/patches-rt/0014-printk-ringbuffer-add-BLK_DATALESS-macro.patch
@@ -0,0 +1,47 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 14 Sep 2020 14:39:50 +0206
+Subject: [PATCH 14/25] printk: ringbuffer: add BLK_DATALESS() macro
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+Rather than continually needing to explicitly check @begin and @next
+to identify a dataless block, introduce and use a BLK_DATALESS()
+macro.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200914123354.832-3-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk_ringbuffer.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -266,6 +266,8 @@
+
+ /* Determine if a logical position refers to a data-less block. */
+ #define LPOS_DATALESS(lpos) ((lpos) & 1UL)
++#define BLK_DATALESS(blk) (LPOS_DATALESS((blk)->begin) && \
++ LPOS_DATALESS((blk)->next))
+
+ /* Get the logical position at index 0 of the current wrap. */
+ #define DATA_THIS_WRAP_START_LPOS(data_ring, lpos) \
+@@ -1038,7 +1040,7 @@ static unsigned int space_used(struct pr
+ struct prb_data_blk_lpos *blk_lpos)
+ {
+ /* Data-less blocks take no space. */
+- if (LPOS_DATALESS(blk_lpos->begin))
++ if (BLK_DATALESS(blk_lpos))
+ return 0;
+
+ if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) {
+@@ -1071,7 +1073,7 @@ static const char *get_data(struct prb_d
+ struct prb_data_block *db;
+
+ /* Data-less data block description. */
+- if (LPOS_DATALESS(blk_lpos->begin) && LPOS_DATALESS(blk_lpos->next)) {
++ if (BLK_DATALESS(blk_lpos)) {
+ if (blk_lpos->begin == NO_LPOS && blk_lpos->next == NO_LPOS) {
+ *data_size = 0;
+ return "";
diff --git a/debian/patches-rt/0015-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch b/debian/patches-rt/0014-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
index 9adb27b21..5cc2a85ca 100644
--- a/debian/patches-rt/0015-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
+++ b/debian/patches-rt/0014-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
@@ -1,8 +1,8 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 25 Jun 2011 09:21:04 +0200
-Subject: [PATCH 15/23] sched: Add saved_state for tasks blocked on sleeping
+Subject: [PATCH 14/22] sched: Add saved_state for tasks blocked on sleeping
locks
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Spinlocks are state preserving in !RT. RT changes the state when a
task gets blocked on a lock. So we need to remember the state before
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3134,7 +3134,7 @@ try_to_wake_up(struct task_struct *p, un
+@@ -3278,7 +3278,7 @@ try_to_wake_up(struct task_struct *p, un
int cpu, success = 0;
preempt_disable();
@@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We're waking current, this means 'p->on_rq' and 'task_cpu(p)
* == smp_processor_id()'. Together this means we can special
-@@ -3164,8 +3164,26 @@ try_to_wake_up(struct task_struct *p, un
+@@ -3308,8 +3308,26 @@ try_to_wake_up(struct task_struct *p, un
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
smp_mb__after_spinlock();
@@ -75,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
trace_sched_waking(p);
-@@ -3355,6 +3373,18 @@ int wake_up_process(struct task_struct *
+@@ -3499,6 +3517,18 @@ int wake_up_process(struct task_struct *
}
EXPORT_SYMBOL(wake_up_process);
diff --git a/debian/patches-rt/0014-sched-lockdep-Annotate-pi_lock-recursion.patch b/debian/patches-rt/0014-sched-lockdep-Annotate-pi_lock-recursion.patch
index 9fa2b217c..4020cf46e 100644
--- a/debian/patches-rt/0014-sched-lockdep-Annotate-pi_lock-recursion.patch
+++ b/debian/patches-rt/0014-sched-lockdep-Annotate-pi_lock-recursion.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz@infradead.org>
-Date: Mon, 5 Oct 2020 16:57:31 +0200
-Subject: [PATCH 14/17] sched, lockdep: Annotate ->pi_lock recursion
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Date: Fri, 23 Oct 2020 12:12:12 +0200
+Subject: [PATCH 14/19] sched, lockdep: Annotate ->pi_lock recursion
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
There's a valid ->pi_lock recursion issue where the actual PI code
tries to wake up the stop task. Make lockdep aware so it doesn't
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2602,6 +2602,7 @@ int select_task_rq(struct task_struct *p
+@@ -2660,6 +2660,7 @@ int select_task_rq(struct task_struct *p
void sched_set_stop_task(int cpu, struct task_struct *stop)
{
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
struct task_struct *old_stop = cpu_rq(cpu)->stop;
-@@ -2617,6 +2618,20 @@ void sched_set_stop_task(int cpu, struct
+@@ -2675,6 +2676,20 @@ void sched_set_stop_task(int cpu, struct
sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
stop->sched_class = &stop_sched_class;
diff --git a/debian/patches-rt/0016-locking-rtmutex-add-sleeping-lock-implementation.patch b/debian/patches-rt/0015-locking-rtmutex-add-sleeping-lock-implementation.patch
index 2880c2568..0cd531152 100644
--- a/debian/patches-rt/0016-locking-rtmutex-add-sleeping-lock-implementation.patch
+++ b/debian/patches-rt/0015-locking-rtmutex-add-sleeping-lock-implementation.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 17:11:19 +0200
-Subject: [PATCH 16/23] locking/rtmutex: add sleeping lock implementation
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Subject: [PATCH 15/22] locking/rtmutex: add sleeping lock implementation
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -937,7 +937,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state)
-@@ -1612,16 +1941,13 @@ void __sched __rt_mutex_unlock(struct rt
+@@ -1597,16 +1926,13 @@ void __sched __rt_mutex_unlock(struct rt
void __sched rt_mutex_unlock(struct rt_mutex *lock)
{
mutex_release(&lock->dep_map, _RET_IP_);
@@ -958,7 +958,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
lockdep_assert_held(&lock->wait_lock);
-@@ -1638,23 +1964,35 @@ bool __sched __rt_mutex_futex_unlock(str
+@@ -1623,23 +1949,35 @@ bool __sched __rt_mutex_futex_unlock(str
* avoid inversion prior to the wakeup. preempt_disable()
* therein pairs with rt_mutex_postunlock().
*/
@@ -997,7 +997,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -1690,7 +2028,7 @@ void __rt_mutex_init(struct rt_mutex *lo
+@@ -1675,7 +2013,7 @@ void __rt_mutex_init(struct rt_mutex *lo
if (name && key)
debug_rt_mutex_init(lock, name, key);
}
@@ -1006,7 +1006,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
-@@ -1710,6 +2048,14 @@ void rt_mutex_init_proxy_locked(struct r
+@@ -1695,6 +2033,14 @@ void rt_mutex_init_proxy_locked(struct r
struct task_struct *proxy_owner)
{
__rt_mutex_init(lock, NULL, NULL);
@@ -1021,7 +1021,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
debug_rt_mutex_proxy_lock(lock, proxy_owner);
rt_mutex_set_owner(lock, proxy_owner);
}
-@@ -1733,6 +2079,26 @@ void rt_mutex_proxy_unlock(struct rt_mut
+@@ -1718,6 +2064,26 @@ void rt_mutex_proxy_unlock(struct rt_mut
rt_mutex_set_owner(lock, NULL);
}
@@ -1048,7 +1048,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
* @lock: the rt_mutex to take
-@@ -1805,6 +2171,9 @@ int __rt_mutex_start_proxy_lock(struct r
+@@ -1790,6 +2156,9 @@ int __rt_mutex_start_proxy_lock(struct r
ret = 0;
}
@@ -1058,7 +1058,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -1894,6 +2263,9 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -1879,6 +2248,9 @@ int rt_mutex_wait_proxy_lock(struct rt_m
* have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
diff --git a/debian/patches-rt/0015-printk-print-history-for-new-consoles.patch b/debian/patches-rt/0015-printk-print-history-for-new-consoles.patch
deleted file mode 100644
index f6eefd8d2..000000000
--- a/debian/patches-rt/0015-printk-print-history-for-new-consoles.patch
+++ /dev/null
@@ -1,119 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:53 +0100
-Subject: [PATCH 15/25] printk: print history for new consoles
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-When new consoles register, they currently print how many messages
-they have missed. However, many (or all) of those messages may still
-be in the ring buffer. Add functionality to print as much of the
-history as available. This is a clean replacement of the old
-exclusive console hack.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/console.h | 1
- kernel/printk/printk.c | 75 ++++++++++++++++++++++++++++++++++++++++++++++++
- 2 files changed, 76 insertions(+)
-
---- a/include/linux/console.h
-+++ b/include/linux/console.h
-@@ -151,6 +151,7 @@ struct console {
- short index;
- int cflag;
- unsigned long printk_seq;
-+ int wrote_history;
- void *data;
- struct console *next;
- };
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -1568,6 +1568,77 @@ static void format_text(struct printk_lo
- }
- }
-
-+static void printk_write_history(struct console *con, u64 master_seq)
-+{
-+ struct prb_iterator iter;
-+ bool time = printk_time;
-+ static char *ext_text;
-+ static char *text;
-+ static char *buf;
-+ u64 seq;
-+
-+ ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL);
-+ text = kmalloc(PRINTK_SPRINT_MAX, GFP_KERNEL);
-+ buf = kmalloc(PRINTK_RECORD_MAX, GFP_KERNEL);
-+ if (!ext_text || !text || !buf)
-+ return;
-+
-+ if (!(con->flags & CON_ENABLED))
-+ goto out;
-+
-+ if (!con->write)
-+ goto out;
-+
-+ if (!cpu_online(raw_smp_processor_id()) &&
-+ !(con->flags & CON_ANYTIME))
-+ goto out;
-+
-+ prb_iter_init(&iter, &printk_rb, NULL);
-+
-+ for (;;) {
-+ struct printk_log *msg;
-+ size_t ext_len;
-+ size_t len;
-+ int ret;
-+
-+ ret = prb_iter_next(&iter, buf, PRINTK_RECORD_MAX, &seq);
-+ if (ret == 0) {
-+ break;
-+ } else if (ret < 0) {
-+ prb_iter_init(&iter, &printk_rb, NULL);
-+ continue;
-+ }
-+
-+ if (seq > master_seq)
-+ break;
-+
-+ con->printk_seq++;
-+ if (con->printk_seq < seq) {
-+ print_console_dropped(con, seq - con->printk_seq);
-+ con->printk_seq = seq;
-+ }
-+
-+ msg = (struct printk_log *)buf;
-+ format_text(msg, master_seq, ext_text, &ext_len, text,
-+ &len, time);
-+
-+ if (len == 0 && ext_len == 0)
-+ continue;
-+
-+ if (con->flags & CON_EXTENDED)
-+ con->write(con, ext_text, ext_len);
-+ else
-+ con->write(con, text, len);
-+
-+ printk_delay(msg->level);
-+ }
-+out:
-+ con->wrote_history = 1;
-+ kfree(ext_text);
-+ kfree(text);
-+ kfree(buf);
-+}
-+
- /*
- * Call the console drivers, asking them to write out
- * log_buf[start] to log_buf[end - 1].
-@@ -1583,6 +1654,10 @@ static void call_console_drivers(u64 seq
- for_each_console(con) {
- if (!(con->flags & CON_ENABLED))
- continue;
-+ if (!con->wrote_history) {
-+ printk_write_history(con, seq);
-+ continue;
-+ }
- if (!con->write)
- continue;
- if (!cpu_online(raw_smp_processor_id()) &&
diff --git a/debian/patches-rt/0015-printk-ringbuffer-clear-initial-reserved-fields.patch b/debian/patches-rt/0015-printk-ringbuffer-clear-initial-reserved-fields.patch
new file mode 100644
index 000000000..3c576294c
--- /dev/null
+++ b/debian/patches-rt/0015-printk-ringbuffer-clear-initial-reserved-fields.patch
@@ -0,0 +1,136 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 14 Sep 2020 14:39:51 +0206
+Subject: [PATCH 15/25] printk: ringbuffer: clear initial reserved fields
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+prb_reserve() will set some meta data values and leave others
+uninitialized (or rather, containing the values of the previous
+wrap). Simplify the API by always clearing out all the fields.
+Only the sequence number is filled in. The caller is now
+responsible for filling in the rest of the meta data fields.
+In particular, for correctly filling in text and dict lengths.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200914123354.832-4-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 12 ++++++++----
+ kernel/printk/printk_ringbuffer.c | 30 ++++++++++++++++++------------
+ 2 files changed, 26 insertions(+), 16 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -520,8 +520,11 @@ static int log_store(u32 caller_id, int
+ memcpy(&r.text_buf[0], text, text_len);
+ if (trunc_msg_len)
+ memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len);
+- if (r.dict_buf)
++ r.info->text_len = text_len + trunc_msg_len;
++ if (r.dict_buf) {
+ memcpy(&r.dict_buf[0], dict, dict_len);
++ r.info->dict_len = dict_len;
++ }
+ r.info->facility = facility;
+ r.info->level = level & 7;
+ r.info->flags = flags & 0x1f;
+@@ -1077,10 +1080,11 @@ static unsigned int __init add_to_rb(str
+ if (!prb_reserve(&e, rb, &dest_r))
+ return 0;
+
+- memcpy(&dest_r.text_buf[0], &r->text_buf[0], dest_r.text_buf_size);
++ memcpy(&dest_r.text_buf[0], &r->text_buf[0], r->info->text_len);
++ dest_r.info->text_len = r->info->text_len;
+ if (dest_r.dict_buf) {
+- memcpy(&dest_r.dict_buf[0], &r->dict_buf[0],
+- dest_r.dict_buf_size);
++ memcpy(&dest_r.dict_buf[0], &r->dict_buf[0], r->info->dict_len);
++ dest_r.info->dict_len = r->info->dict_len;
+ }
+ dest_r.info->facility = r->info->facility;
+ dest_r.info->level = r->info->level;
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -146,10 +146,13 @@
+ *
+ * if (prb_reserve(&e, &test_rb, &r)) {
+ * snprintf(r.text_buf, r.text_buf_size, "%s", textstr);
++ * r.info->text_len = strlen(textstr);
+ *
+ * // dictionary allocation may have failed
+- * if (r.dict_buf)
++ * if (r.dict_buf) {
+ * snprintf(r.dict_buf, r.dict_buf_size, "%s", dictstr);
++ * r.info->dict_len = strlen(dictstr);
++ * }
+ *
+ * r.info->ts_nsec = local_clock();
+ *
+@@ -1142,9 +1145,9 @@ static const char *get_data(struct prb_d
+ * @dict_buf_size is set to 0. Writers must check this before writing to
+ * dictionary space.
+ *
+- * @info->text_len and @info->dict_len will already be set to @text_buf_size
+- * and @dict_buf_size, respectively. If dictionary space reservation fails,
+- * @info->dict_len is set to 0.
++ * Important: @info->text_len and @info->dict_len need to be set correctly by
++ * the writer in order for data to be readable and/or extended.
++ * Their values are initialized to 0.
+ */
+ bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
+ struct printk_record *r)
+@@ -1152,6 +1155,7 @@ bool prb_reserve(struct prb_reserved_ent
+ struct prb_desc_ring *desc_ring = &rb->desc_ring;
+ struct prb_desc *d;
+ unsigned long id;
++ u64 seq;
+
+ if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
+ goto fail;
+@@ -1177,6 +1181,14 @@ bool prb_reserve(struct prb_reserved_ent
+ d = to_desc(desc_ring, id);
+
+ /*
++ * All @info fields (except @seq) are cleared and must be filled in
++ * by the writer. Save @seq before clearing because it is used to
++ * determine the new sequence number.
++ */
++ seq = d->info.seq;
++ memset(&d->info, 0, sizeof(d->info));
++
++ /*
+ * Set the @e fields here so that prb_commit() can be used if
+ * text data allocation fails.
+ */
+@@ -1194,17 +1206,15 @@ bool prb_reserve(struct prb_reserved_ent
+ * See the "Bootstrap" comment block in printk_ringbuffer.h for
+ * details about how the initializer bootstraps the descriptors.
+ */
+- if (d->info.seq == 0 && DESC_INDEX(desc_ring, id) != 0)
++ if (seq == 0 && DESC_INDEX(desc_ring, id) != 0)
+ d->info.seq = DESC_INDEX(desc_ring, id);
+ else
+- d->info.seq += DESCS_COUNT(desc_ring);
++ d->info.seq = seq + DESCS_COUNT(desc_ring);
+
+ r->text_buf = data_alloc(rb, &rb->text_data_ring, r->text_buf_size,
+ &d->text_blk_lpos, id);
+ /* If text data allocation fails, a data-less record is committed. */
+ if (r->text_buf_size && !r->text_buf) {
+- d->info.text_len = 0;
+- d->info.dict_len = 0;
+ prb_commit(e);
+ /* prb_commit() re-enabled interrupts. */
+ goto fail;
+@@ -1221,10 +1231,6 @@ bool prb_reserve(struct prb_reserved_ent
+
+ r->info = &d->info;
+
+- /* Set default values for the sizes. */
+- d->info.text_len = r->text_buf_size;
+- d->info.dict_len = r->dict_buf_size;
+-
+ /* Record full text space used by record. */
+ e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos);
+
diff --git a/debian/patches-rt/0015-sched-Fix-migrate_disable-vs-rt-dl-balancing.patch b/debian/patches-rt/0015-sched-Fix-migrate_disable-vs-rt-dl-balancing.patch
index 49e0a90d3..fccd49ebe 100644
--- a/debian/patches-rt/0015-sched-Fix-migrate_disable-vs-rt-dl-balancing.patch
+++ b/debian/patches-rt/0015-sched-Fix-migrate_disable-vs-rt-dl-balancing.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz@infradead.org>
-Date: Mon, 5 Oct 2020 16:57:32 +0200
-Subject: [PATCH 15/17] sched: Fix migrate_disable() vs rt/dl balancing
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Date: Fri, 23 Oct 2020 12:12:13 +0200
+Subject: [PATCH 15/19] sched: Fix migrate_disable() vs rt/dl balancing
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
In order to minimize the interference of migrate_disable() on lower
priority tasks, which can be deprived of runtime due to being stuck
@@ -28,10 +28,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
include/linux/preempt.h | 38 +++++++++++++++------------
include/linux/sched.h | 3 +-
kernel/sched/core.c | 67 ++++++++++++++++++++++++++++++++++++++++--------
- kernel/sched/deadline.c | 26 +++++++++++++-----
+ kernel/sched/deadline.c | 29 +++++++++++++++-----
kernel/sched/rt.c | 63 ++++++++++++++++++++++++++++++++++++---------
kernel/sched/sched.h | 32 ++++++++++++++++++++++
- 6 files changed, 182 insertions(+), 47 deletions(-)
+ 6 files changed, 185 insertions(+), 47 deletions(-)
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -108,7 +108,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline bool rq_has_pinned_tasks(struct rq *rq)
{
return rq->nr_pinned;
-@@ -1917,6 +1912,49 @@ static int migration_cpu_stop(void *data
+@@ -1976,6 +1971,49 @@ static int migration_cpu_stop(void *data
return 0;
}
@@ -158,7 +158,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* sched_class::set_cpus_allowed must do the below, but is not required to
* actually call this function.
-@@ -2004,6 +2042,14 @@ static int affine_move_task(struct rq *r
+@@ -2056,6 +2094,14 @@ static int affine_move_task(struct rq *r
/* Can the task run on the task's current CPU? If so, we're done */
if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
@@ -172,8 +172,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
pending = p->migration_pending;
if (pending) {
- p->migration_pending = NULL;
-@@ -2011,6 +2057,11 @@ static int affine_move_task(struct rq *r
+ refcount_inc(&pending->refs);
+@@ -2064,6 +2110,11 @@ static int affine_move_task(struct rq *r
}
task_rq_unlock(rq, p, rf);
@@ -185,15 +185,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (complete)
goto do_complete;
-@@ -2048,6 +2099,7 @@ static int affine_move_task(struct rq *r
-
+@@ -2100,6 +2151,7 @@ static int affine_move_task(struct rq *r
if (flags & SCA_MIGRATE_ENABLE) {
+ refcount_inc(&pending->refs); /* pending->{arg,stop_work} */
+ p->migration_flags &= ~MDF_PUSH;
task_rq_unlock(rq, p, rf);
- pending->arg = arg;
- stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
-@@ -2660,11 +2712,6 @@ static inline int __set_cpus_allowed_ptr
+
+ pending->arg = (struct migration_arg) {
+@@ -2718,11 +2770,6 @@ static inline int __set_cpus_allowed_ptr
static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
@@ -207,7 +207,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return false;
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
-@@ -2182,7 +2182,7 @@ static void push_dl_tasks(struct rq *rq)
+@@ -2105,6 +2105,9 @@ static int push_dl_task(struct rq *rq)
+ return 0;
+
+ retry:
++ if (is_migration_disabled(next_task))
++ return 0;
++
+ if (WARN_ON(next_task == rq->curr))
+ return 0;
+
+@@ -2182,7 +2185,7 @@ static void push_dl_tasks(struct rq *rq)
static void pull_dl_task(struct rq *this_rq)
{
int this_cpu = this_rq->cpu, cpu;
@@ -216,7 +226,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
bool resched = false;
struct rq *src_rq;
u64 dmin = LONG_MAX;
-@@ -2212,6 +2212,7 @@ static void pull_dl_task(struct rq *this
+@@ -2212,6 +2215,7 @@ static void pull_dl_task(struct rq *this
continue;
/* Might drop this_rq->lock */
@@ -224,7 +234,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
double_lock_balance(this_rq, src_rq);
/*
-@@ -2243,17 +2244,27 @@ static void pull_dl_task(struct rq *this
+@@ -2243,17 +2247,27 @@ static void pull_dl_task(struct rq *this
src_rq->curr->dl.deadline))
goto skip;
@@ -258,7 +268,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
if (resched)
-@@ -2497,6 +2508,7 @@ const struct sched_class dl_sched_class
+@@ -2500,6 +2514,7 @@ const struct sched_class dl_sched_class
.rq_online = rq_online_dl,
.rq_offline = rq_offline_dl,
.task_woken = task_woken_dl,
@@ -396,7 +406,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
if (resched)
-@@ -2446,6 +2484,7 @@ const struct sched_class rt_sched_class
+@@ -2449,6 +2487,7 @@ const struct sched_class rt_sched_class
.rq_offline = rq_offline_rt,
.task_woken = task_woken_rt,
.switched_from = switched_from_rt,
diff --git a/debian/patches-rt/0017-locking-rtmutex-Allow-rt_mutex_trylock-on-PREEMPT_RT.patch b/debian/patches-rt/0016-locking-rtmutex-Allow-rt_mutex_trylock-on-PREEMPT_RT.patch
index b3e5443df..75af4952d 100644
--- a/debian/patches-rt/0017-locking-rtmutex-Allow-rt_mutex_trylock-on-PREEMPT_RT.patch
+++ b/debian/patches-rt/0016-locking-rtmutex-Allow-rt_mutex_trylock-on-PREEMPT_RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 2 Dec 2015 11:34:07 +0100
-Subject: [PATCH 17/23] locking/rtmutex: Allow rt_mutex_trylock() on PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Subject: [PATCH 16/22] locking/rtmutex: Allow rt_mutex_trylock() on PREEMPT_RT
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Non PREEMPT_RT kernel can deadlock on rt_mutex_trylock() in softirq
context.
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1899,7 +1899,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_killable
+@@ -1884,7 +1884,11 @@ int __sched __rt_mutex_futex_trylock(str
int __sched __rt_mutex_trylock(struct rt_mutex *lock)
{
diff --git a/debian/patches-rt/0016-printk-implement-CON_PRINTBUFFER.patch b/debian/patches-rt/0016-printk-implement-CON_PRINTBUFFER.patch
deleted file mode 100644
index b19900289..000000000
--- a/debian/patches-rt/0016-printk-implement-CON_PRINTBUFFER.patch
+++ /dev/null
@@ -1,92 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:54 +0100
-Subject: [PATCH 16/25] printk: implement CON_PRINTBUFFER
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-If the CON_PRINTBUFFER flag is not set, do not replay the history
-for that console.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk.c | 34 ++++++----------------------------
- 1 file changed, 6 insertions(+), 28 deletions(-)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -419,10 +419,6 @@ static u32 log_first_idx;
- static u64 log_next_seq;
- static u32 log_next_idx;
-
--/* the next printk record to write to the console */
--static u64 console_seq;
--static u32 console_idx;
--
- /* the next printk record to read after the last 'clear' command */
- static u64 clear_seq;
- static u32 clear_idx;
-@@ -1655,8 +1651,12 @@ static void call_console_drivers(u64 seq
- if (!(con->flags & CON_ENABLED))
- continue;
- if (!con->wrote_history) {
-- printk_write_history(con, seq);
-- continue;
-+ if (con->flags & CON_PRINTBUFFER) {
-+ printk_write_history(con, seq);
-+ continue;
-+ }
-+ con->wrote_history = 1;
-+ con->printk_seq = seq - 1;
- }
- if (!con->write)
- continue;
-@@ -1875,8 +1875,6 @@ EXPORT_SYMBOL(printk);
-
- static u64 syslog_seq;
- static u32 syslog_idx;
--static u64 console_seq;
--static u32 console_idx;
- static u64 log_first_seq;
- static u32 log_first_idx;
- static u64 log_next_seq;
-@@ -2207,15 +2205,6 @@ void console_flush_on_panic(enum con_flu
- */
- console_trylock();
- console_may_schedule = 0;
--
-- if (mode == CONSOLE_REPLAY_ALL) {
-- unsigned long flags;
--
-- logbuf_lock_irqsave(flags);
-- console_seq = log_first_seq;
-- console_idx = log_first_idx;
-- logbuf_unlock_irqrestore(flags);
-- }
- console_unlock();
- }
-
-@@ -2350,7 +2339,6 @@ static int try_enable_new_console(struct
- */
- void register_console(struct console *newcon)
- {
-- unsigned long flags;
- struct console *bcon = NULL;
- int err;
-
-@@ -2438,16 +2426,6 @@ void register_console(struct console *ne
- if (newcon->flags & CON_EXTENDED)
- nr_ext_console_drivers++;
-
-- if (newcon->flags & CON_PRINTBUFFER) {
-- /*
-- * console_unlock(); will print out the buffered messages
-- * for us.
-- */
-- logbuf_lock_irqsave(flags);
-- console_seq = syslog_seq;
-- console_idx = syslog_idx;
-- logbuf_unlock_irqrestore(flags);
-- }
- console_unlock();
- console_sysfs_notify();
-
diff --git a/debian/patches-rt/0016-printk-ringbuffer-change-representation-of-states.patch b/debian/patches-rt/0016-printk-ringbuffer-change-representation-of-states.patch
new file mode 100644
index 000000000..425504dbc
--- /dev/null
+++ b/debian/patches-rt/0016-printk-ringbuffer-change-representation-of-states.patch
@@ -0,0 +1,207 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 14 Sep 2020 14:39:52 +0206
+Subject: [PATCH 16/25] printk: ringbuffer: change representation of states
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+Rather than deriving the state by evaluating bits within the flags
+area of the state variable, assign the states explicit values and
+set those values in the flags area. Introduce macros to make it
+simple to read and write state values for the state variable.
+
+Although the functionality is preserved, the binary representation
+for the states is changed.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200914123354.832-5-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ Documentation/admin-guide/kdump/gdbmacros.txt | 12 ++++++----
+ kernel/printk/printk_ringbuffer.c | 28 +++++------------------
+ kernel/printk/printk_ringbuffer.h | 31 ++++++++++++++++----------
+ scripts/gdb/linux/dmesg.py | 11 +++++----
+ 4 files changed, 41 insertions(+), 41 deletions(-)
+
+--- a/Documentation/admin-guide/kdump/gdbmacros.txt
++++ b/Documentation/admin-guide/kdump/gdbmacros.txt
+@@ -295,9 +295,12 @@ document dump_record
+ end
+
+ define dmesg
+- set var $desc_committed = 1UL << ((sizeof(long) * 8) - 1)
+- set var $flags_mask = 3UL << ((sizeof(long) * 8) - 2)
+- set var $id_mask = ~$flags_mask
++ # definitions from kernel/printk/printk_ringbuffer.h
++ set var $desc_committed = 1
++ set var $desc_sv_bits = sizeof(long) * 8
++ set var $desc_flags_shift = $desc_sv_bits - 2
++ set var $desc_flags_mask = 3 << $desc_flags_shift
++ set var $id_mask = ~$desc_flags_mask
+
+ set var $desc_count = 1U << prb->desc_ring.count_bits
+ set var $prev_flags = 0
+@@ -309,7 +312,8 @@ define dmesg
+ set var $desc = &prb->desc_ring.descs[$id % $desc_count]
+
+ # skip non-committed record
+- if (($desc->state_var.counter & $flags_mask) == $desc_committed)
++ set var $state = 3 & ($desc->state_var.counter >> $desc_flags_shift)
++ if ($state == $desc_committed)
+ dump_record $desc $prev_flags
+ set var $prev_flags = $desc->info.flags
+ end
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -348,14 +348,6 @@ static bool data_check_size(struct prb_d
+ return true;
+ }
+
+-/* The possible responses of a descriptor state-query. */
+-enum desc_state {
+- desc_miss, /* ID mismatch */
+- desc_reserved, /* reserved, in use by writer */
+- desc_committed, /* committed, writer is done */
+- desc_reusable, /* free, not yet used by any writer */
+-};
+-
+ /* Query the state of a descriptor. */
+ static enum desc_state get_desc_state(unsigned long id,
+ unsigned long state_val)
+@@ -363,13 +355,7 @@ static enum desc_state get_desc_state(un
+ if (id != DESC_ID(state_val))
+ return desc_miss;
+
+- if (state_val & DESC_REUSE_MASK)
+- return desc_reusable;
+-
+- if (state_val & DESC_COMMITTED_MASK)
+- return desc_committed;
+-
+- return desc_reserved;
++ return DESC_STATE(state_val);
+ }
+
+ /*
+@@ -484,8 +470,8 @@ static enum desc_state desc_read(struct
+ static void desc_make_reusable(struct prb_desc_ring *desc_ring,
+ unsigned long id)
+ {
+- unsigned long val_committed = id | DESC_COMMITTED_MASK;
+- unsigned long val_reusable = val_committed | DESC_REUSE_MASK;
++ unsigned long val_committed = DESC_SV(id, desc_committed);
++ unsigned long val_reusable = DESC_SV(id, desc_reusable);
+ struct prb_desc *desc = to_desc(desc_ring, id);
+ atomic_long_t *state_var = &desc->state_var;
+
+@@ -921,7 +907,7 @@ static bool desc_reserve(struct printk_r
+ */
+ prev_state_val = atomic_long_read(&desc->state_var); /* LMM(desc_reserve:E) */
+ if (prev_state_val &&
+- prev_state_val != (id_prev_wrap | DESC_COMMITTED_MASK | DESC_REUSE_MASK)) {
++ get_desc_state(id_prev_wrap, prev_state_val) != desc_reusable) {
+ WARN_ON_ONCE(1);
+ return false;
+ }
+@@ -935,7 +921,7 @@ static bool desc_reserve(struct printk_r
+ * This pairs with desc_read:D.
+ */
+ if (!atomic_long_try_cmpxchg(&desc->state_var, &prev_state_val,
+- id | 0)) { /* LMM(desc_reserve:F) */
++ DESC_SV(id, desc_reserved))) { /* LMM(desc_reserve:F) */
+ WARN_ON_ONCE(1);
+ return false;
+ }
+@@ -1254,7 +1240,7 @@ void prb_commit(struct prb_reserved_entr
+ {
+ struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
+ struct prb_desc *d = to_desc(desc_ring, e->id);
+- unsigned long prev_state_val = e->id | 0;
++ unsigned long prev_state_val = DESC_SV(e->id, desc_reserved);
+
+ /* Now the writer has finished all writing: LMM(prb_commit:A) */
+
+@@ -1267,7 +1253,7 @@ void prb_commit(struct prb_reserved_entr
+ * this. This pairs with desc_read:B.
+ */
+ if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val,
+- e->id | DESC_COMMITTED_MASK)) { /* LMM(prb_commit:B) */
++ DESC_SV(e->id, desc_committed))) { /* LMM(prb_commit:B) */
+ WARN_ON_ONCE(1);
+ }
+
+--- a/kernel/printk/printk_ringbuffer.h
++++ b/kernel/printk/printk_ringbuffer.h
+@@ -112,16 +112,25 @@ struct prb_reserved_entry {
+ unsigned int text_space;
+ };
+
+-#define _DATA_SIZE(sz_bits) (1UL << (sz_bits))
+-#define _DESCS_COUNT(ct_bits) (1U << (ct_bits))
+-#define DESC_SV_BITS (sizeof(unsigned long) * 8)
+-#define DESC_COMMITTED_MASK (1UL << (DESC_SV_BITS - 1))
+-#define DESC_REUSE_MASK (1UL << (DESC_SV_BITS - 2))
+-#define DESC_FLAGS_MASK (DESC_COMMITTED_MASK | DESC_REUSE_MASK)
+-#define DESC_ID_MASK (~DESC_FLAGS_MASK)
+-#define DESC_ID(sv) ((sv) & DESC_ID_MASK)
+-#define FAILED_LPOS 0x1
+-#define NO_LPOS 0x3
++/* The possible responses of a descriptor state-query. */
++enum desc_state {
++ desc_miss = -1, /* ID mismatch (pseudo state) */
++ desc_reserved = 0x0, /* reserved, in use by writer */
++ desc_committed = 0x1, /* committed by writer */
++ desc_reusable = 0x3, /* free, not yet used by any writer */
++};
++
++#define _DATA_SIZE(sz_bits) (1UL << (sz_bits))
++#define _DESCS_COUNT(ct_bits) (1U << (ct_bits))
++#define DESC_SV_BITS (sizeof(unsigned long) * 8)
++#define DESC_FLAGS_SHIFT (DESC_SV_BITS - 2)
++#define DESC_FLAGS_MASK (3UL << DESC_FLAGS_SHIFT)
++#define DESC_STATE(sv) (3UL & (sv >> DESC_FLAGS_SHIFT))
++#define DESC_SV(id, state) (((unsigned long)state << DESC_FLAGS_SHIFT) | id)
++#define DESC_ID_MASK (~DESC_FLAGS_MASK)
++#define DESC_ID(sv) ((sv) & DESC_ID_MASK)
++#define FAILED_LPOS 0x1
++#define NO_LPOS 0x3
+
+ #define FAILED_BLK_LPOS \
+ { \
+@@ -213,7 +222,7 @@ struct prb_reserved_entry {
+ */
+ #define BLK0_LPOS(sz_bits) (-(_DATA_SIZE(sz_bits)))
+ #define DESC0_ID(ct_bits) DESC_ID(-(_DESCS_COUNT(ct_bits) + 1))
+-#define DESC0_SV(ct_bits) (DESC_COMMITTED_MASK | DESC_REUSE_MASK | DESC0_ID(ct_bits))
++#define DESC0_SV(ct_bits) DESC_SV(DESC0_ID(ct_bits), desc_reusable)
+
+ /*
+ * Define a ringbuffer with an external text data buffer. The same as
+--- a/scripts/gdb/linux/dmesg.py
++++ b/scripts/gdb/linux/dmesg.py
+@@ -78,10 +78,10 @@ atomic_long_type = utils.CachedType("ato
+ len_off = off + printk_info_type.get_type()['text_len'].bitpos // 8
+
+ # definitions from kernel/printk/printk_ringbuffer.h
++ desc_committed = 1
+ desc_sv_bits = utils.get_long_type().sizeof * 8
+- desc_committed_mask = 1 << (desc_sv_bits - 1)
+- desc_reuse_mask = 1 << (desc_sv_bits - 2)
+- desc_flags_mask = desc_committed_mask | desc_reuse_mask
++ desc_flags_shift = desc_sv_bits - 2
++ desc_flags_mask = 3 << desc_flags_shift
+ desc_id_mask = ~desc_flags_mask
+
+ # read in tail and head descriptor ids
+@@ -96,8 +96,9 @@ atomic_long_type = utils.CachedType("ato
+ desc_off = desc_sz * ind
+
+ # skip non-committed record
+- state = utils.read_u64(descs, desc_off + sv_off + counter_off) & desc_flags_mask
+- if state != desc_committed_mask:
++ state = 3 & (utils.read_u64(descs, desc_off + sv_off +
++ counter_off) >> desc_flags_shift)
++ if state != desc_committed:
+ if did == head_id:
+ break
+ did = (did + 1) & desc_id_mask
diff --git a/debian/patches-rt/0016-sched-proc-Print-accurate-cpumask-vs-migrate_disable.patch b/debian/patches-rt/0016-sched-proc-Print-accurate-cpumask-vs-migrate_disable.patch
index 28d081dbb..48bc1852d 100644
--- a/debian/patches-rt/0016-sched-proc-Print-accurate-cpumask-vs-migrate_disable.patch
+++ b/debian/patches-rt/0016-sched-proc-Print-accurate-cpumask-vs-migrate_disable.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz@infradead.org>
-Date: Mon, 5 Oct 2020 16:57:33 +0200
-Subject: [PATCH 16/17] sched/proc: Print accurate cpumask vs migrate_disable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Date: Fri, 23 Oct 2020 12:12:14 +0200
+Subject: [PATCH 16/19] sched/proc: Print accurate cpumask vs migrate_disable()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Ensure /proc/*/status doesn't print 'random' cpumasks due to
migrate_disable().
diff --git a/debian/patches-rt/0018-locking-rtmutex-add-mutex-implementation-based-on-rt.patch b/debian/patches-rt/0017-locking-rtmutex-add-mutex-implementation-based-on-rt.patch
index 1754f7527..6c28f789b 100644
--- a/debian/patches-rt/0018-locking-rtmutex-add-mutex-implementation-based-on-rt.patch
+++ b/debian/patches-rt/0017-locking-rtmutex-add-mutex-implementation-based-on-rt.patch
@@ -1,21 +1,21 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 17:17:03 +0200
-Subject: [PATCH 18/23] locking/rtmutex: add mutex implementation based on
+Subject: [PATCH 17/22] locking/rtmutex: add mutex implementation based on
rtmutex
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/mutex_rt.h | 131 +++++++++++++++++++++++++++
- kernel/locking/mutex-rt.c | 222 ++++++++++++++++++++++++++++++++++++++++++++++
- 2 files changed, 353 insertions(+)
+ include/linux/mutex_rt.h | 130 ++++++++++++++++++++++++++
+ kernel/locking/mutex-rt.c | 224 ++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 354 insertions(+)
create mode 100644 include/linux/mutex_rt.h
create mode 100644 kernel/locking/mutex-rt.c
--- /dev/null
+++ b/include/linux/mutex_rt.h
-@@ -0,0 +1,131 @@
+@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_MUTEX_RT_H
+#define __LINUX_MUTEX_RT_H
@@ -47,7 +47,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key);
+extern void __lockfunc _mutex_lock(struct mutex *lock);
-+extern void __lockfunc _mutex_lock_io(struct mutex *lock);
+extern void __lockfunc _mutex_lock_io_nested(struct mutex *lock, int subclass);
+extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
+extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
@@ -64,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#define mutex_lock_killable(l) _mutex_lock_killable(l)
+#define mutex_trylock(l) _mutex_trylock(l)
+#define mutex_unlock(l) _mutex_unlock(l)
-+#define mutex_lock_io(l) _mutex_lock_io(l);
++#define mutex_lock_io(l) _mutex_lock_io_nested(l, 0);
+
+#define __mutex_owner(l) ((l)->lock.owner)
+
@@ -95,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+# define mutex_lock_killable_nested(l, s) \
+ _mutex_lock_killable(l)
+# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
-+# define mutex_lock_io_nested(l, s) _mutex_lock_io(l)
++# define mutex_lock_io_nested(l, s) _mutex_lock_io_nested(l, s)
+#endif
+
+# define mutex_init(mutex) \
@@ -149,7 +148,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
--- /dev/null
+++ b/kernel/locking/mutex-rt.c
-@@ -0,0 +1,222 @@
+@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Real-Time Preemption Support
@@ -217,6 +216,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#include <linux/fs.h>
+#include <linux/futex.h>
+#include <linux/hrtimer.h>
++#include <linux/blkdev.h>
+
+#include "rtmutex_common.h"
+
@@ -237,29 +237,43 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+}
+EXPORT_SYMBOL(__mutex_do_init);
+
++static int _mutex_lock_blk_flush(struct mutex *lock, int state)
++{
++ /*
++ * Flush blk before ->pi_blocked_on is set. At schedule() time it is too
++ * late if one of the callbacks needs to acquire a sleeping lock.
++ */
++ if (blk_needs_flush_plug(current))
++ blk_schedule_flush_plug(current);
++ return __rt_mutex_lock_state(&lock->lock, state);
++}
++
+void __lockfunc _mutex_lock(struct mutex *lock)
+{
+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-+ __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE);
++ _mutex_lock_blk_flush(lock, TASK_UNINTERRUPTIBLE);
+}
+EXPORT_SYMBOL(_mutex_lock);
+
-+void __lockfunc _mutex_lock_io(struct mutex *lock)
++void __lockfunc _mutex_lock_io_nested(struct mutex *lock, int subclass)
+{
+ int token;
+
+ token = io_schedule_prepare();
-+ _mutex_lock(lock);
++
++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
++ __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE);
++
+ io_schedule_finish(token);
+}
-+EXPORT_SYMBOL_GPL(_mutex_lock_io);
++EXPORT_SYMBOL_GPL(_mutex_lock_io_nested);
+
+int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
+{
+ int ret;
+
+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-+ ret = __rt_mutex_lock_state(&lock->lock, TASK_INTERRUPTIBLE);
++ ret = _mutex_lock_blk_flush(lock, TASK_INTERRUPTIBLE);
+ if (ret)
+ mutex_release(&lock->dep_map, _RET_IP_);
+ return ret;
@@ -271,7 +285,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ int ret;
+
+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-+ ret = __rt_mutex_lock_state(&lock->lock, TASK_KILLABLE);
++ ret = _mutex_lock_blk_flush(lock, TASK_KILLABLE);
+ if (ret)
+ mutex_release(&lock->dep_map, _RET_IP_);
+ return ret;
@@ -282,27 +296,14 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
+{
+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
-+ __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE);
++ _mutex_lock_blk_flush(lock, TASK_UNINTERRUPTIBLE);
+}
+EXPORT_SYMBOL(_mutex_lock_nested);
+
-+void __lockfunc _mutex_lock_io_nested(struct mutex *lock, int subclass)
-+{
-+ int token;
-+
-+ token = io_schedule_prepare();
-+
-+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
-+ __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE);
-+
-+ io_schedule_finish(token);
-+}
-+EXPORT_SYMBOL_GPL(_mutex_lock_io_nested);
-+
+void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
+{
+ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
-+ __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE);
++ _mutex_lock_blk_flush(lock, TASK_UNINTERRUPTIBLE);
+}
+EXPORT_SYMBOL(_mutex_lock_nest_lock);
+
@@ -311,7 +312,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ int ret;
+
+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
-+ ret = __rt_mutex_lock_state(&lock->lock, TASK_INTERRUPTIBLE);
++ ret = _mutex_lock_blk_flush(lock, TASK_INTERRUPTIBLE);
+ if (ret)
+ mutex_release(&lock->dep_map, _RET_IP_);
+ return ret;
@@ -323,7 +324,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ int ret;
+
+ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-+ ret = __rt_mutex_lock_state(&lock->lock, TASK_KILLABLE);
++ ret = _mutex_lock_blk_flush(lock, TASK_KILLABLE);
+ if (ret)
+ mutex_release(&lock->dep_map, _RET_IP_);
+ return ret;
diff --git a/debian/patches-rt/0017-printk-add-processor-number-to-output.patch b/debian/patches-rt/0017-printk-add-processor-number-to-output.patch
deleted file mode 100644
index 8ee3b0d95..000000000
--- a/debian/patches-rt/0017-printk-add-processor-number-to-output.patch
+++ /dev/null
@@ -1,100 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:55 +0100
-Subject: [PATCH 17/25] printk: add processor number to output
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-It can be difficult to sort printk out if multiple processors are
-printing simultaneously. Add the processor number to the printk
-output to allow the messages to be sorted.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk.c | 19 +++++++++++++++----
- 1 file changed, 15 insertions(+), 4 deletions(-)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -348,6 +348,7 @@ enum log_flags {
-
- struct printk_log {
- u64 ts_nsec; /* timestamp in nanoseconds */
-+ u16 cpu; /* cpu that generated record */
- u16 len; /* length of entire record */
- u16 text_len; /* length of text buffer */
- u16 dict_len; /* length of dictionary buffer */
-@@ -499,7 +500,7 @@ static u32 log_next(u32 idx)
-
- /* insert record into the buffer, discard old ones, update heads */
- static int log_store(u32 caller_id, int facility, int level,
-- enum log_flags flags, u64 ts_nsec,
-+ enum log_flags flags, u64 ts_nsec, u16 cpu,
- const char *dict, u16 dict_len,
- const char *text, u16 text_len)
- {
-@@ -533,6 +534,7 @@ static int log_store(u32 caller_id, int
- #ifdef CONFIG_PRINTK_CALLER
- msg->caller_id = caller_id;
- #endif
-+ msg->cpu = cpu;
- msg->len = size;
-
- /* insert message */
-@@ -606,9 +608,9 @@ static ssize_t msg_print_ext_header(char
-
- do_div(ts_usec, 1000);
-
-- return scnprintf(buf, size, "%u,%llu,%llu,%c%s;",
-+ return scnprintf(buf, size, "%u,%llu,%llu,%c%s,%hu;",
- (msg->facility << 3) | msg->level, seq, ts_usec,
-- msg->flags & LOG_CONT ? 'c' : '-', caller);
-+ msg->flags & LOG_CONT ? 'c' : '-', caller, msg->cpu);
- }
-
- static ssize_t msg_print_ext_body(char *buf, size_t size,
-@@ -1150,6 +1152,11 @@ static inline void boot_delay_msec(int l
- static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME);
- module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
-
-+static size_t print_cpu(u16 cpu, char *buf)
-+{
-+ return sprintf(buf, "%03hu: ", cpu);
-+}
-+
- static size_t print_syslog(unsigned int level, char *buf)
- {
- return sprintf(buf, "<%u>", level);
-@@ -1193,6 +1200,7 @@ static size_t print_prefix(const struct
- buf[len++] = ' ';
- buf[len] = '\0';
- }
-+ len += print_cpu(msg->cpu, buf + len);
-
- return len;
- }
-@@ -1765,6 +1773,7 @@ asmlinkage int vprintk_emit(int facility
- u64 ts_nsec;
- char *text;
- char *rbuf;
-+ int cpu;
-
- ts_nsec = local_clock();
-
-@@ -1774,6 +1783,8 @@ asmlinkage int vprintk_emit(int facility
- return printed_len;
- }
-
-+ cpu = raw_smp_processor_id();
-+
- text = rbuf;
- text_len = vscnprintf(text, PRINTK_SPRINT_MAX, fmt, args);
-
-@@ -1808,7 +1819,7 @@ asmlinkage int vprintk_emit(int facility
- if (dict)
- lflags |= LOG_NEWLINE;
-
-- printed_len = log_store(caller_id, facility, level, lflags, ts_nsec,
-+ printed_len = log_store(caller_id, facility, level, lflags, ts_nsec, cpu,
- dict, dictlen, text, text_len);
-
- prb_commit(&h);
diff --git a/debian/patches-rt/0017-printk-ringbuffer-add-finalization-extension-support.patch b/debian/patches-rt/0017-printk-ringbuffer-add-finalization-extension-support.patch
new file mode 100644
index 000000000..fc33a8f44
--- /dev/null
+++ b/debian/patches-rt/0017-printk-ringbuffer-add-finalization-extension-support.patch
@@ -0,0 +1,898 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 14 Sep 2020 14:39:53 +0206
+Subject: [PATCH 17/25] printk: ringbuffer: add finalization/extension support
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+Add support for extending the newest data block. For this, introduce
+a new finalization state (desc_finalized) denoting a committed
+descriptor that cannot be extended.
+
+Until a record is finalized, a writer can reopen that record to
+append new data. Reopening a record means transitioning from the
+desc_committed state back to the desc_reserved state.
+
+A writer can explicitly finalize a record if there is no intention
+of extending it. Also, records are automatically finalized when a
+new record is reserved. This relieves writers of needing to
+explicitly finalize while also making such records available to
+readers sooner. (Readers can only traverse finalized records.)
+
+Four new memory barrier pairs are introduced. Two of them are
+insignificant additions (data_realloc:A/desc_read:D and
+data_realloc:A/data_push_tail:B) because they are alternate path
+memory barriers that exactly match the purpose, pairing, and
+context of the two existing memory barrier pairs they provide an
+alternate path for. The other two new memory barrier pairs are
+significant additions:
+
+desc_reopen_last:A / _prb_commit:B - When reopening a descriptor,
+ ensure the state transitions back to desc_reserved before
+ fully trusting the descriptor data.
+
+_prb_commit:B / desc_reserve:D - When committing a descriptor,
+ ensure the state transitions to desc_committed before checking
+ the head ID to see if the descriptor needs to be finalized.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200914123354.832-6-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ Documentation/admin-guide/kdump/gdbmacros.txt | 3
+ kernel/printk/printk_ringbuffer.c | 525 +++++++++++++++++++++++---
+ kernel/printk/printk_ringbuffer.h | 6
+ scripts/gdb/linux/dmesg.py | 3
+ 4 files changed, 480 insertions(+), 57 deletions(-)
+
+--- a/Documentation/admin-guide/kdump/gdbmacros.txt
++++ b/Documentation/admin-guide/kdump/gdbmacros.txt
+@@ -297,6 +297,7 @@ end
+ define dmesg
+ # definitions from kernel/printk/printk_ringbuffer.h
+ set var $desc_committed = 1
++ set var $desc_finalized = 2
+ set var $desc_sv_bits = sizeof(long) * 8
+ set var $desc_flags_shift = $desc_sv_bits - 2
+ set var $desc_flags_mask = 3 << $desc_flags_shift
+@@ -313,7 +314,7 @@ define dmesg
+
+ # skip non-committed record
+ set var $state = 3 & ($desc->state_var.counter >> $desc_flags_shift)
+- if ($state == $desc_committed)
++ if ($state == $desc_committed || $state == $desc_finalized)
+ dump_record $desc $prev_flags
+ set var $prev_flags = $desc->info.flags
+ end
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -46,20 +46,26 @@
+ * into a single descriptor field named @state_var, allowing ID and state to
+ * be synchronously and atomically updated.
+ *
+- * Descriptors have three states:
++ * Descriptors have four states:
+ *
+ * reserved
+ * A writer is modifying the record.
+ *
+ * committed
+- * The record and all its data are complete and available for reading.
++ * The record and all its data are written. A writer can reopen the
++ * descriptor (transitioning it back to reserved), but in the committed
++ * state the data is consistent.
++ *
++ * finalized
++ * The record and all its data are complete and available for reading. A
++ * writer cannot reopen the descriptor.
+ *
+ * reusable
+ * The record exists, but its text and/or dictionary data may no longer
+ * be available.
+ *
+ * Querying the @state_var of a record requires providing the ID of the
+- * descriptor to query. This can yield a possible fourth (pseudo) state:
++ * descriptor to query. This can yield a possible fifth (pseudo) state:
+ *
+ * miss
+ * The descriptor being queried has an unexpected ID.
+@@ -79,6 +85,28 @@
+ * committed or reusable queried state. This makes it possible that a valid
+ * sequence number of the tail is always available.
+ *
++ * Descriptor Finalization
++ * ~~~~~~~~~~~~~~~~~~~~~~~
++ * When a writer calls the commit function prb_commit(), record data is
++ * fully stored and is consistent within the ringbuffer. However, a writer can
++ * reopen that record, claiming exclusive access (as with prb_reserve()), and
++ * modify that record. When finished, the writer must again commit the record.
++ *
++ * In order for a record to be made available to readers (and also become
++ * recyclable for writers), it must be finalized. A finalized record cannot be
++ * reopened and can never become "unfinalized". Record finalization can occur
++ * in three different scenarios:
++ *
++ * 1) A writer can simultaneously commit and finalize its record by calling
++ * prb_final_commit() instead of prb_commit().
++ *
++ * 2) When a new record is reserved and the previous record has been
++ * committed via prb_commit(), that previous record is automatically
++ * finalized.
++ *
++ * 3) When a record is committed via prb_commit() and a newer record
++ * already exists, the record being committed is automatically finalized.
++ *
+ * Data Rings
+ * ~~~~~~~~~~
+ * The two data rings (text and dictionary) function identically. They exist
+@@ -97,7 +125,7 @@
+ * are met:
+ *
+ * 1) The descriptor associated with the data block is in the committed
+- * queried state.
++ * or finalized queried state.
+ *
+ * 2) The blk_lpos struct within the descriptor associated with the data
+ * block references back to the same data block.
+@@ -156,9 +184,38 @@
+ *
+ * r.info->ts_nsec = local_clock();
+ *
++ * prb_final_commit(&e);
++ * }
++ *
++ * Note that additional writer functions are available to extend a record
++ * after it has been committed but not yet finalized. This can be done as
++ * long as no new records have been reserved and the caller is the same.
++ *
++ * Sample writer code (record extending)::
++ *
++ * // alternate rest of previous example
++ * r.info->ts_nsec = local_clock();
++ * r.info->text_len = strlen(textstr);
++ * r.info->caller_id = printk_caller_id();
++ *
++ * // commit the record (but do not finalize yet)
+ * prb_commit(&e);
+ * }
+ *
++ * ...
++ *
++ * // specify additional 5 bytes text space to extend
++ * prb_rec_init_wr(&r, 5, 0);
++ *
++ * if (prb_reserve_in_last(&e, &test_rb, &r, printk_caller_id())) {
++ * snprintf(&r.text_buf[r.info->text_len],
++ * r.text_buf_size - r.info->text_len, "hello");
++ *
++ * r.info->text_len += 5;
++ *
++ * prb_final_commit(&e);
++ * }
++ *
+ * Sample reader code::
+ *
+ * struct printk_info info;
+@@ -236,15 +293,21 @@
+ * desc_reserve:F / desc_read:D
+ * set new descriptor id and reserved (state), then allow writer changes
+ *
+- * data_alloc:A / desc_read:D
++ * data_alloc:A (or data_realloc:A) / desc_read:D
+ * set old descriptor reusable (state), then modify new data block area
+ *
+- * data_alloc:A / data_push_tail:B
++ * data_alloc:A (or data_realloc:A) / data_push_tail:B
+ * push data tail (lpos), then modify new data block area
+ *
+- * prb_commit:B / desc_read:B
++ * _prb_commit:B / desc_read:B
+ * store writer changes, then set new descriptor committed (state)
+ *
++ * desc_reopen_last:A / _prb_commit:B
++ * set descriptor reserved (state), then read descriptor data
++ *
++ * _prb_commit:B / desc_reserve:D
++ * set new descriptor committed (state), then check descriptor head (id)
++ *
+ * data_push_tail:D / data_push_tail:A
+ * set descriptor reusable (state), then push data tail (lpos)
+ *
+@@ -386,16 +449,16 @@ static enum desc_state desc_read(struct
+ /*
+ * Guarantee the state is loaded before copying the descriptor
+ * content. This avoids copying obsolete descriptor content that might
+- * not apply to the descriptor state. This pairs with prb_commit:B.
++ * not apply to the descriptor state. This pairs with _prb_commit:B.
+ *
+ * Memory barrier involvement:
+ *
+- * If desc_read:A reads from prb_commit:B, then desc_read:C reads
+- * from prb_commit:A.
++ * If desc_read:A reads from _prb_commit:B, then desc_read:C reads
++ * from _prb_commit:A.
+ *
+ * Relies on:
+ *
+- * WMB from prb_commit:A to prb_commit:B
++ * WMB from _prb_commit:A to _prb_commit:B
+ * matching
+ * RMB from desc_read:A to desc_read:C
+ */
+@@ -431,7 +494,8 @@ static enum desc_state desc_read(struct
+ *
+ * 2. Guarantee the record data is loaded before re-checking the
+ * state. This avoids reading an obsolete descriptor state that may
+- * not apply to the copied data. This pairs with data_alloc:A.
++ * not apply to the copied data. This pairs with data_alloc:A and
++ * data_realloc:A.
+ *
+ * Memory barrier involvement:
+ *
+@@ -463,19 +527,19 @@ static enum desc_state desc_read(struct
+ }
+
+ /*
+- * Take a specified descriptor out of the committed state by attempting
+- * the transition from committed to reusable. Either this context or some
++ * Take a specified descriptor out of the finalized state by attempting
++ * the transition from finalized to reusable. Either this context or some
+ * other context will have been successful.
+ */
+ static void desc_make_reusable(struct prb_desc_ring *desc_ring,
+ unsigned long id)
+ {
+- unsigned long val_committed = DESC_SV(id, desc_committed);
++ unsigned long val_finalized = DESC_SV(id, desc_finalized);
+ unsigned long val_reusable = DESC_SV(id, desc_reusable);
+ struct prb_desc *desc = to_desc(desc_ring, id);
+ atomic_long_t *state_var = &desc->state_var;
+
+- atomic_long_cmpxchg_relaxed(state_var, val_committed,
++ atomic_long_cmpxchg_relaxed(state_var, val_finalized,
+ val_reusable); /* LMM(desc_make_reusable:A) */
+ }
+
+@@ -484,7 +548,7 @@ static void desc_make_reusable(struct pr
+ * data block from @lpos_begin until @lpos_end into the reusable state.
+ *
+ * If there is any problem making the associated descriptor reusable, either
+- * the descriptor has not yet been committed or another writer context has
++ * the descriptor has not yet been finalized or another writer context has
+ * already pushed the tail lpos past the problematic data block. Regardless,
+ * on error the caller can re-load the tail lpos to determine the situation.
+ */
+@@ -528,10 +592,10 @@ static bool data_make_reusable(struct pr
+
+ switch (d_state) {
+ case desc_miss:
+- return false;
+ case desc_reserved:
+- return false;
+ case desc_committed:
++ return false;
++ case desc_finalized:
+ /*
+ * This data block is invalid if the descriptor
+ * does not point back to it.
+@@ -616,7 +680,7 @@ static bool data_push_tail(struct printk
+ * data_make_reusable() may be due to a newly
+ * recycled data area causing the tail lpos to
+ * have been previously pushed. This pairs with
+- * data_alloc:A.
++ * data_alloc:A and data_realloc:A.
+ *
+ * Memory barrier involvement:
+ *
+@@ -729,8 +793,9 @@ static bool desc_push_tail(struct printk
+ */
+ return true;
+ case desc_reserved:
+- return false;
+ case desc_committed:
++ return false;
++ case desc_finalized:
+ desc_make_reusable(desc_ring, tail_id);
+ break;
+ case desc_reusable:
+@@ -751,7 +816,7 @@ static bool desc_push_tail(struct printk
+
+ /*
+ * Check the next descriptor after @tail_id before pushing the tail
+- * to it because the tail must always be in a committed or reusable
++ * to it because the tail must always be in a finalized or reusable
+ * state. The implementation of prb_first_seq() relies on this.
+ *
+ * A successful read implies that the next descriptor is less than or
+@@ -760,7 +825,7 @@ static bool desc_push_tail(struct printk
+ */
+ d_state = desc_read(desc_ring, DESC_ID(tail_id + 1), &desc); /* LMM(desc_push_tail:A) */
+
+- if (d_state == desc_committed || d_state == desc_reusable) {
++ if (d_state == desc_finalized || d_state == desc_reusable) {
+ /*
+ * Guarantee any descriptor states that have transitioned to
+ * reusable are stored before pushing the tail ID. This allows
+@@ -895,6 +960,10 @@ static bool desc_reserve(struct printk_r
+ * another CPU may have pushed the tail ID. This pairs
+ * with desc_push_tail:C and this also pairs with
+ * prb_first_seq:C.
++ *
++ * 5. Guarantee the head ID is stored before trying to
++ * finalize the previous descriptor. This pairs with
++ * _prb_commit:B.
+ */
+ } while (!atomic_long_try_cmpxchg(&desc_ring->head_id, &head_id,
+ id)); /* LMM(desc_reserve:D) */
+@@ -1024,6 +1093,84 @@ static char *data_alloc(struct printk_ri
+ return &blk->data[0];
+ }
+
++/*
++ * Try to resize an existing data block associated with the descriptor
++ * specified by @id. If the resized data block should become wrapped, it
++ * copies the old data to the new data block. If @size yields a data block
++ * with the same or less size, the data block is left as is.
++ *
++ * Fail if this is not the last allocated data block or if there is not
++ * enough space or it is not possible make enough space.
++ *
++ * Return a pointer to the beginning of the entire data buffer or NULL on
++ * failure.
++ */
++static char *data_realloc(struct printk_ringbuffer *rb,
++ struct prb_data_ring *data_ring, unsigned int size,
++ struct prb_data_blk_lpos *blk_lpos, unsigned long id)
++{
++ struct prb_data_block *blk;
++ unsigned long head_lpos;
++ unsigned long next_lpos;
++ bool wrapped;
++
++ /* Reallocation only works if @blk_lpos is the newest data block. */
++ head_lpos = atomic_long_read(&data_ring->head_lpos);
++ if (head_lpos != blk_lpos->next)
++ return NULL;
++
++ /* Keep track if @blk_lpos was a wrapping data block. */
++ wrapped = (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, blk_lpos->next));
++
++ size = to_blk_size(size);
++
++ next_lpos = get_next_lpos(data_ring, blk_lpos->begin, size);
++
++ /* If the data block does not increase, there is nothing to do. */
++ if (head_lpos - next_lpos < DATA_SIZE(data_ring)) {
++ blk = to_block(data_ring, blk_lpos->begin);
++ return &blk->data[0];
++ }
++
++ if (!data_push_tail(rb, data_ring, next_lpos - DATA_SIZE(data_ring)))
++ return NULL;
++
++ /* The memory barrier involvement is the same as data_alloc:A. */
++ if (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &head_lpos,
++ next_lpos)) { /* LMM(data_realloc:A) */
++ return NULL;
++ }
++
++ blk = to_block(data_ring, blk_lpos->begin);
++
++ if (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, next_lpos)) {
++ struct prb_data_block *old_blk = blk;
++
++ /* Wrapping data blocks store their data at the beginning. */
++ blk = to_block(data_ring, 0);
++
++ /*
++ * Store the ID on the wrapped block for consistency.
++ * The printk_ringbuffer does not actually use it.
++ */
++ blk->id = id;
++
++ if (!wrapped) {
++ /*
++ * Since the allocated space is now in the newly
++ * created wrapping data block, copy the content
++ * from the old data block.
++ */
++ memcpy(&blk->data[0], &old_blk->data[0],
++ (blk_lpos->next - blk_lpos->begin) - sizeof(blk->id));
++ }
++ }
++
++ blk_lpos->next = next_lpos;
++
++ return &blk->data[0];
++}
++
+ /* Return the number of bytes used by a data block. */
+ static unsigned int space_used(struct prb_data_ring *data_ring,
+ struct prb_data_blk_lpos *blk_lpos)
+@@ -1104,6 +1251,206 @@ static const char *get_data(struct prb_d
+ return &db->data[0];
+ }
+
++/*
++ * Attempt to transition the newest descriptor from committed back to reserved
++ * so that the record can be modified by a writer again. This is only possible
++ * if the descriptor is not yet finalized and the provided @caller_id matches.
++ */
++static struct prb_desc *desc_reopen_last(struct prb_desc_ring *desc_ring,
++ u32 caller_id, unsigned long *id_out)
++{
++ unsigned long prev_state_val;
++ enum desc_state d_state;
++ struct prb_desc desc;
++ struct prb_desc *d;
++ unsigned long id;
++
++ id = atomic_long_read(&desc_ring->head_id);
++
++ /*
++ * To reduce unnecessarily reopening, first check if the descriptor
++ * state and caller ID are correct.
++ */
++ d_state = desc_read(desc_ring, id, &desc);
++ if (d_state != desc_committed || desc.info.caller_id != caller_id)
++ return NULL;
++
++ d = to_desc(desc_ring, id);
++
++ prev_state_val = DESC_SV(id, desc_committed);
++
++ /*
++ * Guarantee the reserved state is stored before reading any
++ * record data. A full memory barrier is needed because @state_var
++ * modification is followed by reading. This pairs with _prb_commit:B.
++ *
++ * Memory barrier involvement:
++ *
++ * If desc_reopen_last:A reads from _prb_commit:B, then
++ * prb_reserve_in_last:A reads from _prb_commit:A.
++ *
++ * Relies on:
++ *
++ * WMB from _prb_commit:A to _prb_commit:B
++ * matching
++ * MB If desc_reopen_last:A to prb_reserve_in_last:A
++ */
++ if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val,
++ DESC_SV(id, desc_reserved))) { /* LMM(desc_reopen_last:A) */
++ return NULL;
++ }
++
++ *id_out = id;
++ return d;
++}
++
++/**
++ * prb_reserve_in_last() - Re-reserve and extend the space in the ringbuffer
++ * used by the newest record.
++ *
++ * @e: The entry structure to setup.
++ * @rb: The ringbuffer to re-reserve and extend data in.
++ * @r: The record structure to allocate buffers for.
++ * @caller_id: The caller ID of the caller (reserving writer).
++ *
++ * This is the public function available to writers to re-reserve and extend
++ * data.
++ *
++ * The writer specifies the text size to extend (not the new total size) by
++ * setting the @text_buf_size field of @r. Extending dictionaries is not
++ * supported, so @dict_buf_size of @r should be set to 0. To ensure proper
++ * initialization of @r, prb_rec_init_wr() should be used.
++ *
++ * This function will fail if @caller_id does not match the caller ID of the
++ * newest record. In that case the caller must reserve new data using
++ * prb_reserve().
++ *
++ * Context: Any context. Disables local interrupts on success.
++ * Return: true if text data could be extended, otherwise false.
++ *
++ * On success:
++ *
++ * - @r->text_buf points to the beginning of the entire text buffer.
++ *
++ * - @r->text_buf_size is set to the new total size of the buffer.
++ *
++ * - @r->dict_buf and @r->dict_buf_size are cleared because extending
++ * the dict buffer is not supported.
++ *
++ * - @r->info is not touched so that @r->info->text_len could be used
++ * to append the text.
++ *
++ * - prb_record_text_space() can be used on @e to query the new
++ * actually used space.
++ *
++ * Important: All @r->info fields will already be set with the current values
++ * for the record. I.e. @r->info->text_len will be less than
++ * @text_buf_size and @r->info->dict_len may be set, even though
++ * @dict_buf_size is 0. Writers can use @r->info->text_len to know
++ * where concatenation begins and writers should update
++ * @r->info->text_len after concatenating.
++ */
++bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
++ struct printk_record *r, u32 caller_id)
++{
++ unsigned int data_size;
++ struct prb_desc *d;
++ unsigned long id;
++
++ local_irq_save(e->irqflags);
++
++ /* Transition the newest descriptor back to the reserved state. */
++ d = desc_reopen_last(&rb->desc_ring, caller_id, &id);
++ if (!d) {
++ local_irq_restore(e->irqflags);
++ goto fail_reopen;
++ }
++
++ /* Now the writer has exclusive access: LMM(prb_reserve_in_last:A) */
++
++ /*
++ * Set the @e fields here so that prb_commit() can be used if
++ * anything fails from now on.
++ */
++ e->rb = rb;
++ e->id = id;
++
++ /*
++ * desc_reopen_last() checked the caller_id, but there was no
++ * exclusive access at that point. The descriptor may have
++ * changed since then.
++ */
++ if (caller_id != d->info.caller_id)
++ goto fail;
++
++ if (BLK_DATALESS(&d->text_blk_lpos)) {
++ if (WARN_ON_ONCE(d->info.text_len != 0)) {
++ pr_warn_once("wrong text_len value (%hu, expecting 0)\n",
++ d->info.text_len);
++ d->info.text_len = 0;
++ }
++
++ if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
++ goto fail;
++
++ r->text_buf = data_alloc(rb, &rb->text_data_ring, r->text_buf_size,
++ &d->text_blk_lpos, id);
++ } else {
++ if (!get_data(&rb->text_data_ring, &d->text_blk_lpos, &data_size))
++ goto fail;
++
++ /*
++ * Increase the buffer size to include the original size. If
++ * the meta data (@text_len) is not sane, use the full data
++ * block size.
++ */
++ if (WARN_ON_ONCE(d->info.text_len > data_size)) {
++ pr_warn_once("wrong text_len value (%hu, expecting <=%u)\n",
++ d->info.text_len, data_size);
++ d->info.text_len = data_size;
++ }
++ r->text_buf_size += d->info.text_len;
++
++ if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
++ goto fail;
++
++ r->text_buf = data_realloc(rb, &rb->text_data_ring, r->text_buf_size,
++ &d->text_blk_lpos, id);
++ }
++ if (r->text_buf_size && !r->text_buf)
++ goto fail;
++
++ /* Although dictionary data may be in use, it cannot be extended. */
++ r->dict_buf = NULL;
++ r->dict_buf_size = 0;
++
++ r->info = &d->info;
++
++ e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos);
++
++ return true;
++fail:
++ prb_commit(e);
++ /* prb_commit() re-enabled interrupts. */
++fail_reopen:
++ /* Make it clear to the caller that the re-reserve failed. */
++ memset(r, 0, sizeof(*r));
++ return false;
++}
++
++/*
++ * Attempt to finalize a specified descriptor. If this fails, the descriptor
++ * is either already final or it will finalize itself when the writer commits.
++ */
++static void desc_make_final(struct prb_desc_ring *desc_ring, unsigned long id)
++{
++ unsigned long prev_state_val = DESC_SV(id, desc_committed);
++ struct prb_desc *d = to_desc(desc_ring, id);
++
++ atomic_long_cmpxchg_relaxed(&d->state_var, prev_state_val,
++ DESC_SV(id, desc_finalized)); /* LMM(desc_make_final:A) */
++}
++
+ /**
+ * prb_reserve() - Reserve space in the ringbuffer.
+ *
+@@ -1197,6 +1544,15 @@ bool prb_reserve(struct prb_reserved_ent
+ else
+ d->info.seq = seq + DESCS_COUNT(desc_ring);
+
++ /*
++ * New data is about to be reserved. Once that happens, previous
++ * descriptors are no longer able to be extended. Finalize the
++ * previous descriptor now so that it can be made available to
++ * readers. (For seq==0 there is no previous descriptor.)
++ */
++ if (d->info.seq > 0)
++ desc_make_final(desc_ring, DESC_ID(id - 1));
++
+ r->text_buf = data_alloc(rb, &rb->text_data_ring, r->text_buf_size,
+ &d->text_blk_lpos, id);
+ /* If text data allocation fails, a data-less record is committed. */
+@@ -1227,33 +1583,40 @@ bool prb_reserve(struct prb_reserved_ent
+ return false;
+ }
+
+-/**
+- * prb_commit() - Commit (previously reserved) data to the ringbuffer.
+- *
+- * @e: The entry containing the reserved data information.
+- *
+- * This is the public function available to writers to commit data.
+- *
+- * Context: Any context. Enables local interrupts.
+- */
+-void prb_commit(struct prb_reserved_entry *e)
++/* Commit the data (possibly finalizing it) and restore interrupts. */
++static void _prb_commit(struct prb_reserved_entry *e, unsigned long state_val)
+ {
+ struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
+ struct prb_desc *d = to_desc(desc_ring, e->id);
+ unsigned long prev_state_val = DESC_SV(e->id, desc_reserved);
+
+- /* Now the writer has finished all writing: LMM(prb_commit:A) */
++ /* Now the writer has finished all writing: LMM(_prb_commit:A) */
+
+ /*
+ * Set the descriptor as committed. See "ABA Issues" about why
+ * cmpxchg() instead of set() is used.
+ *
+- * Guarantee all record data is stored before the descriptor state
+- * is stored as committed. A write memory barrier is sufficient for
+- * this. This pairs with desc_read:B.
++ * 1 Guarantee all record data is stored before the descriptor state
++ * is stored as committed. A write memory barrier is sufficient
++ * for this. This pairs with desc_read:B and desc_reopen_last:A.
++ *
++ * 2. Guarantee the descriptor state is stored as committed before
++ * re-checking the head ID in order to possibly finalize this
++ * descriptor. This pairs with desc_reserve:D.
++ *
++ * Memory barrier involvement:
++ *
++ * If prb_commit:A reads from desc_reserve:D, then
++ * desc_make_final:A reads from _prb_commit:B.
++ *
++ * Relies on:
++ *
++ * MB _prb_commit:B to prb_commit:A
++ * matching
++ * MB desc_reserve:D to desc_make_final:A
+ */
+ if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val,
+- DESC_SV(e->id, desc_committed))) { /* LMM(prb_commit:B) */
++ DESC_SV(e->id, state_val))) { /* LMM(_prb_commit:B) */
+ WARN_ON_ONCE(1);
+ }
+
+@@ -1261,6 +1624,59 @@ void prb_commit(struct prb_reserved_entr
+ local_irq_restore(e->irqflags);
+ }
+
++/**
++ * prb_commit() - Commit (previously reserved) data to the ringbuffer.
++ *
++ * @e: The entry containing the reserved data information.
++ *
++ * This is the public function available to writers to commit data.
++ *
++ * Note that the data is not yet available to readers until it is finalized.
++ * Finalizing happens automatically when space for the next record is
++ * reserved.
++ *
++ * See prb_final_commit() for a version of this function that finalizes
++ * immediately.
++ *
++ * Context: Any context. Enables local interrupts.
++ */
++void prb_commit(struct prb_reserved_entry *e)
++{
++ struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
++ unsigned long head_id;
++
++ _prb_commit(e, desc_committed);
++
++ /*
++ * If this descriptor is no longer the head (i.e. a new record has
++ * been allocated), extending the data for this record is no longer
++ * allowed and therefore it must be finalized.
++ */
++ head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_commit:A) */
++ if (head_id != e->id)
++ desc_make_final(desc_ring, e->id);
++}
++
++/**
++ * prb_final_commit() - Commit and finalize (previously reserved) data to
++ * the ringbuffer.
++ *
++ * @e: The entry containing the reserved data information.
++ *
++ * This is the public function available to writers to commit+finalize data.
++ *
++ * By finalizing, the data is made immediately available to readers.
++ *
++ * This function should only be used if there are no intentions of extending
++ * this data using prb_reserve_in_last().
++ *
++ * Context: Any context. Enables local interrupts.
++ */
++void prb_final_commit(struct prb_reserved_entry *e)
++{
++ _prb_commit(e, desc_finalized);
++}
++
+ /*
+ * Count the number of lines in provided text. All text has at least 1 line
+ * (even if @text_size is 0). Each '\n' processed is counted as an additional
+@@ -1312,7 +1728,7 @@ static bool copy_data(struct prb_data_ri
+ * because of the trailing alignment padding.
+ */
+ if (WARN_ON_ONCE(data_size < (unsigned int)len)) {
+- pr_warn_once("wrong data size (%u, expecting %hu) for data: %.*s\n",
++ pr_warn_once("wrong data size (%u, expecting >=%hu) for data: %.*s\n",
+ data_size, len, data_size, data);
+ return false;
+ }
+@@ -1333,16 +1749,16 @@ static bool copy_data(struct prb_data_ri
+
+ /*
+ * This is an extended version of desc_read(). It gets a copy of a specified
+- * descriptor. However, it also verifies that the record is committed and has
++ * descriptor. However, it also verifies that the record is finalized and has
+ * the sequence number @seq. On success, 0 is returned.
+ *
+ * Error return values:
+- * -EINVAL: A committed record with sequence number @seq does not exist.
+- * -ENOENT: A committed record with sequence number @seq exists, but its data
++ * -EINVAL: A finalized record with sequence number @seq does not exist.
++ * -ENOENT: A finalized record with sequence number @seq exists, but its data
+ * is not available. This is a valid record, so readers should
+ * continue with the next record.
+ */
+-static int desc_read_committed_seq(struct prb_desc_ring *desc_ring,
++static int desc_read_finalized_seq(struct prb_desc_ring *desc_ring,
+ unsigned long id, u64 seq,
+ struct prb_desc *desc_out)
+ {
+@@ -1353,11 +1769,12 @@ static int desc_read_committed_seq(struc
+
+ /*
+ * An unexpected @id (desc_miss) or @seq mismatch means the record
+- * does not exist. A descriptor in the reserved state means the
+- * record does not yet exist for the reader.
++ * does not exist. A descriptor in the reserved or committed state
++ * means the record does not yet exist for the reader.
+ */
+ if (d_state == desc_miss ||
+ d_state == desc_reserved ||
++ d_state == desc_committed ||
+ desc_out->info.seq != seq) {
+ return -EINVAL;
+ }
+@@ -1379,7 +1796,7 @@ static int desc_read_committed_seq(struc
+ * Copy the ringbuffer data from the record with @seq to the provided
+ * @r buffer. On success, 0 is returned.
+ *
+- * See desc_read_committed_seq() for error return values.
++ * See desc_read_finalized_seq() for error return values.
+ */
+ static int prb_read(struct printk_ringbuffer *rb, u64 seq,
+ struct printk_record *r, unsigned int *line_count)
+@@ -1395,7 +1812,7 @@ static int prb_read(struct printk_ringbu
+ id = DESC_ID(atomic_long_read(state_var));
+
+ /* Get a local copy of the correct descriptor (if available). */
+- err = desc_read_committed_seq(desc_ring, id, seq, &desc);
++ err = desc_read_finalized_seq(desc_ring, id, seq, &desc);
+
+ /*
+ * If @r is NULL, the caller is only interested in the availability
+@@ -1425,8 +1842,8 @@ static int prb_read(struct printk_ringbu
+ r->info->dict_len = 0;
+ }
+
+- /* Ensure the record is still committed and has the same @seq. */
+- return desc_read_committed_seq(desc_ring, id, seq, &desc);
++ /* Ensure the record is still finalized and has the same @seq. */
++ return desc_read_finalized_seq(desc_ring, id, seq, &desc);
+ }
+
+ /* Get the sequence number of the tail descriptor. */
+@@ -1444,9 +1861,9 @@ static u64 prb_first_seq(struct printk_r
+
+ /*
+ * This loop will not be infinite because the tail is
+- * _always_ in the committed or reusable state.
++ * _always_ in the finalized or reusable state.
+ */
+- if (d_state == desc_committed || d_state == desc_reusable)
++ if (d_state == desc_finalized || d_state == desc_reusable)
+ break;
+
+ /*
+@@ -1473,8 +1890,8 @@ static u64 prb_first_seq(struct printk_r
+ }
+
+ /*
+- * Non-blocking read of a record. Updates @seq to the last committed record
+- * (which may have no data).
++ * Non-blocking read of a record. Updates @seq to the last finalized record
++ * (which may have no data available).
+ *
+ * See the description of prb_read_valid() and prb_read_valid_info()
+ * for details.
+@@ -1500,7 +1917,7 @@ static bool _prb_read_valid(struct print
+ (*seq)++;
+
+ } else {
+- /* Non-existent/non-committed record. Must stop. */
++ /* Non-existent/non-finalized record. Must stop. */
+ return false;
+ }
+ }
+--- a/kernel/printk/printk_ringbuffer.h
++++ b/kernel/printk/printk_ringbuffer.h
+@@ -116,7 +116,8 @@ struct prb_reserved_entry {
+ enum desc_state {
+ desc_miss = -1, /* ID mismatch (pseudo state) */
+ desc_reserved = 0x0, /* reserved, in use by writer */
+- desc_committed = 0x1, /* committed by writer */
++ desc_committed = 0x1, /* committed by writer, could get reopened */
++ desc_finalized = 0x2, /* committed, no further modification allowed */
+ desc_reusable = 0x3, /* free, not yet used by any writer */
+ };
+
+@@ -327,7 +328,10 @@ static inline void prb_rec_init_wr(struc
+
+ bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
+ struct printk_record *r);
++bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
++ struct printk_record *r, u32 caller_id);
+ void prb_commit(struct prb_reserved_entry *e);
++void prb_final_commit(struct prb_reserved_entry *e);
+
+ void prb_init(struct printk_ringbuffer *rb,
+ char *text_buf, unsigned int text_buf_size,
+--- a/scripts/gdb/linux/dmesg.py
++++ b/scripts/gdb/linux/dmesg.py
+@@ -79,6 +79,7 @@ atomic_long_type = utils.CachedType("ato
+
+ # definitions from kernel/printk/printk_ringbuffer.h
+ desc_committed = 1
++ desc_finalized = 2
+ desc_sv_bits = utils.get_long_type().sizeof * 8
+ desc_flags_shift = desc_sv_bits - 2
+ desc_flags_mask = 3 << desc_flags_shift
+@@ -98,7 +99,7 @@ atomic_long_type = utils.CachedType("ato
+ # skip non-committed record
+ state = 3 & (utils.read_u64(descs, desc_off + sv_off +
+ counter_off) >> desc_flags_shift)
+- if state != desc_committed:
++ if state != desc_committed and state != desc_finalized:
+ if did == head_id:
+ break
+ did = (did + 1) & desc_id_mask
diff --git a/debian/patches-rt/0017-sched-Add-migrate_disable-tracepoints.patch b/debian/patches-rt/0017-sched-Add-migrate_disable-tracepoints.patch
index 5ab884f47..bb55f5d23 100644
--- a/debian/patches-rt/0017-sched-Add-migrate_disable-tracepoints.patch
+++ b/debian/patches-rt/0017-sched-Add-migrate_disable-tracepoints.patch
@@ -1,7 +1,7 @@
From: Peter Zijlstra <peterz@infradead.org>
-Date: Mon, 5 Oct 2020 16:57:34 +0200
-Subject: [PATCH 17/17] sched: Add migrate_disable() tracepoints
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Date: Fri, 23 Oct 2020 12:12:15 +0200
+Subject: [PATCH 17/19] sched: Add migrate_disable() tracepoints
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
XXX write a tracer:
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
-@@ -2245,6 +2245,7 @@ static void pull_dl_task(struct rq *this
+@@ -2248,6 +2248,7 @@ static void pull_dl_task(struct rq *this
goto skip;
if (is_migration_disabled(p)) {
diff --git a/debian/patches-rt/0018-console-add-write_atomic-interface.patch b/debian/patches-rt/0018-console-add-write_atomic-interface.patch
deleted file mode 100644
index b4613c5e6..000000000
--- a/debian/patches-rt/0018-console-add-write_atomic-interface.patch
+++ /dev/null
@@ -1,65 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:56 +0100
-Subject: [PATCH 18/25] console: add write_atomic interface
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-Add a write_atomic callback to the console. This is an optional
-function for console drivers. The function must be atomic (including
-NMI safe) for writing to the console.
-
-Console drivers must still implement the write callback. The
-write_atomic callback will only be used for emergency messages.
-
-Creating an NMI safe write_atomic that must synchronize with write
-requires a careful implementation of the console driver. To aid with
-the implementation, a set of console_atomic_* functions are provided:
-
- void console_atomic_lock(unsigned int *flags);
- void console_atomic_unlock(unsigned int flags);
-
-These functions synchronize using the processor-reentrant cpu lock of
-the printk buffer.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/console.h | 4 ++++
- kernel/printk/printk.c | 12 ++++++++++++
- 2 files changed, 16 insertions(+)
-
---- a/include/linux/console.h
-+++ b/include/linux/console.h
-@@ -141,6 +141,7 @@ static inline int con_debug_leave(void)
- struct console {
- char name[16];
- void (*write)(struct console *, const char *, unsigned);
-+ void (*write_atomic)(struct console *, const char *, unsigned);
- int (*read)(struct console *, char *, unsigned);
- struct tty_driver *(*device)(struct console *, int *);
- void (*unblank)(void);
-@@ -232,4 +233,7 @@ extern void console_init(void);
- void dummycon_register_output_notifier(struct notifier_block *nb);
- void dummycon_unregister_output_notifier(struct notifier_block *nb);
-
-+extern void console_atomic_lock(unsigned int *flags);
-+extern void console_atomic_unlock(unsigned int flags);
-+
- #endif /* _LINUX_CONSOLE_H */
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -3112,3 +3112,15 @@ void kmsg_dump_rewind(struct kmsg_dumper
- }
- EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
- #endif
-+
-+void console_atomic_lock(unsigned int *flags)
-+{
-+ prb_lock(&printk_cpulock, flags);
-+}
-+EXPORT_SYMBOL(console_atomic_lock);
-+
-+void console_atomic_unlock(unsigned int flags)
-+{
-+ prb_unlock(&printk_cpulock, flags);
-+}
-+EXPORT_SYMBOL(console_atomic_unlock);
diff --git a/debian/patches-rt/0019-locking-rtmutex-add-rwsem-implementation-based-on-rt.patch b/debian/patches-rt/0018-locking-rtmutex-add-rwsem-implementation-based-on-rt.patch
index 84a614a39..5c5cf39ee 100644
--- a/debian/patches-rt/0019-locking-rtmutex-add-rwsem-implementation-based-on-rt.patch
+++ b/debian/patches-rt/0018-locking-rtmutex-add-rwsem-implementation-based-on-rt.patch
@@ -1,8 +1,8 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 17:28:34 +0200
-Subject: [PATCH 19/23] locking/rtmutex: add rwsem implementation based on
+Subject: [PATCH 18/22] locking/rtmutex: add rwsem implementation based on
rtmutex
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The RT specific R/W semaphore implementation restricts the number of readers
to one because a writer cannot block on multiple readers and inherit its
@@ -44,8 +44,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/rwsem-rt.h | 69 ++++++++++
- kernel/locking/rwsem-rt.c | 292 ++++++++++++++++++++++++++++++++++++++++++++++
- 2 files changed, 361 insertions(+)
+ kernel/locking/rwsem-rt.c | 307 ++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 376 insertions(+)
create mode 100644 include/linux/rwsem-rt.h
create mode 100644 kernel/locking/rwsem-rt.c
@@ -123,12 +123,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
--- /dev/null
+++ b/kernel/locking/rwsem-rt.c
-@@ -0,0 +1,292 @@
+@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/rwsem.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
+#include <linux/export.h>
++#include <linux/blkdev.h>
+
+#include "rtmutex_common.h"
+
@@ -213,6 +214,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ if (__down_read_trylock(sem))
+ return 0;
+
++ /*
++ * Flush blk before ->pi_blocked_on is set. At schedule() time it is too
++ * late if one of the callbacks needs to acquire a sleeping lock.
++ */
++ if (blk_needs_flush_plug(current))
++ blk_schedule_flush_plug(current);
++
+ might_sleep();
+ raw_spin_lock_irq(&m->wait_lock);
+ /*
@@ -335,6 +343,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ struct rt_mutex *m = &sem->rtmutex;
+ unsigned long flags;
+
++ /*
++ * Flush blk before ->pi_blocked_on is set. At schedule() time it is too
++ * late if one of the callbacks needs to acquire a sleeping lock.
++ */
++ if (blk_needs_flush_plug(current))
++ blk_schedule_flush_plug(current);
++
+ /* Take the rtmutex as a first step */
+ if (__rt_mutex_lock_state(m, state))
+ return -EINTR;
diff --git a/debian/patches-rt/0018-printk-reimplement-log_cont-using-record-extension.patch b/debian/patches-rt/0018-printk-reimplement-log_cont-using-record-extension.patch
new file mode 100644
index 000000000..cae8f0757
--- /dev/null
+++ b/debian/patches-rt/0018-printk-reimplement-log_cont-using-record-extension.patch
@@ -0,0 +1,144 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 14 Sep 2020 14:39:54 +0206
+Subject: [PATCH 18/25] printk: reimplement log_cont using record extension
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+Use the record extending feature of the ringbuffer to implement
+continuous messages. This preserves the existing continuous message
+behavior.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200914123354.832-7-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 98 ++++++++++---------------------------------------
+ 1 file changed, 20 insertions(+), 78 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -535,7 +535,10 @@ static int log_store(u32 caller_id, int
+ r.info->caller_id = caller_id;
+
+ /* insert message */
+- prb_commit(&e);
++ if ((flags & LOG_CONT) || !(flags & LOG_NEWLINE))
++ prb_commit(&e);
++ else
++ prb_final_commit(&e);
+
+ return (text_len + trunc_msg_len);
+ }
+@@ -1092,7 +1095,7 @@ static unsigned int __init add_to_rb(str
+ dest_r.info->ts_nsec = r->info->ts_nsec;
+ dest_r.info->caller_id = r->info->caller_id;
+
+- prb_commit(&e);
++ prb_final_commit(&e);
+
+ return prb_record_text_space(&e);
+ }
+@@ -1892,87 +1895,26 @@ static inline u32 printk_caller_id(void)
+ 0x80000000 + raw_smp_processor_id();
+ }
+
+-/*
+- * Continuation lines are buffered, and not committed to the record buffer
+- * until the line is complete, or a race forces it. The line fragments
+- * though, are printed immediately to the consoles to ensure everything has
+- * reached the console in case of a kernel crash.
+- */
+-static struct cont {
+- char buf[LOG_LINE_MAX];
+- size_t len; /* length == 0 means unused buffer */
+- u32 caller_id; /* printk_caller_id() of first print */
+- u64 ts_nsec; /* time of first print */
+- u8 level; /* log level of first message */
+- u8 facility; /* log facility of first message */
+- enum log_flags flags; /* prefix, newline flags */
+-} cont;
+-
+-static void cont_flush(void)
+-{
+- if (cont.len == 0)
+- return;
+-
+- log_store(cont.caller_id, cont.facility, cont.level, cont.flags,
+- cont.ts_nsec, NULL, 0, cont.buf, cont.len);
+- cont.len = 0;
+-}
+-
+-static bool cont_add(u32 caller_id, int facility, int level,
+- enum log_flags flags, const char *text, size_t len)
+-{
+- /* If the line gets too long, split it up in separate records. */
+- if (cont.len + len > sizeof(cont.buf)) {
+- cont_flush();
+- return false;
+- }
+-
+- if (!cont.len) {
+- cont.facility = facility;
+- cont.level = level;
+- cont.caller_id = caller_id;
+- cont.ts_nsec = local_clock();
+- cont.flags = flags;
+- }
+-
+- memcpy(cont.buf + cont.len, text, len);
+- cont.len += len;
+-
+- // The original flags come from the first line,
+- // but later continuations can add a newline.
+- if (flags & LOG_NEWLINE) {
+- cont.flags |= LOG_NEWLINE;
+- cont_flush();
+- }
+-
+- return true;
+-}
+-
+ static size_t log_output(int facility, int level, enum log_flags lflags, const char *dict, size_t dictlen, char *text, size_t text_len)
+ {
+ const u32 caller_id = printk_caller_id();
+
+- /*
+- * If an earlier line was buffered, and we're a continuation
+- * write from the same context, try to add it to the buffer.
+- */
+- if (cont.len) {
+- if (cont.caller_id == caller_id && (lflags & LOG_CONT)) {
+- if (cont_add(caller_id, facility, level, lflags, text, text_len))
+- return text_len;
+- }
+- /* Otherwise, make sure it's flushed */
+- cont_flush();
+- }
+-
+- /* Skip empty continuation lines that couldn't be added - they just flush */
+- if (!text_len && (lflags & LOG_CONT))
+- return 0;
+-
+- /* If it doesn't end in a newline, try to buffer the current line */
+- if (!(lflags & LOG_NEWLINE)) {
+- if (cont_add(caller_id, facility, level, lflags, text, text_len))
++ if (lflags & LOG_CONT) {
++ struct prb_reserved_entry e;
++ struct printk_record r;
++
++ prb_rec_init_wr(&r, text_len, 0);
++ if (prb_reserve_in_last(&e, prb, &r, caller_id)) {
++ memcpy(&r.text_buf[r.info->text_len], text, text_len);
++ r.info->text_len += text_len;
++ if (lflags & LOG_NEWLINE) {
++ r.info->flags |= LOG_NEWLINE;
++ prb_final_commit(&e);
++ } else {
++ prb_commit(&e);
++ }
+ return text_len;
++ }
+ }
+
+ /* Store it in the record log */
diff --git a/debian/patches-rt/0018-sched-Deny-self-issued-__set_cpus_allowed_ptr-when-m.patch b/debian/patches-rt/0018-sched-Deny-self-issued-__set_cpus_allowed_ptr-when-m.patch
new file mode 100644
index 000000000..246544170
--- /dev/null
+++ b/debian/patches-rt/0018-sched-Deny-self-issued-__set_cpus_allowed_ptr-when-m.patch
@@ -0,0 +1,40 @@
+From: Valentin Schneider <valentin.schneider@arm.com>
+Date: Fri, 23 Oct 2020 12:12:16 +0200
+Subject: [PATCH 18/19] sched: Deny self-issued __set_cpus_allowed_ptr() when
+ migrate_disable()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+ migrate_disable();
+ set_cpus_allowed_ptr(current, {something excluding task_cpu(current)});
+ affine_move_task(); <-- never returns
+
+Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20201013140116.26651-1-valentin.schneider@arm.com
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/sched/core.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2244,8 +2244,17 @@ static int __set_cpus_allowed_ptr(struct
+ goto out;
+ }
+
+- if (!(flags & SCA_MIGRATE_ENABLE) && cpumask_equal(&p->cpus_mask, new_mask))
+- goto out;
++ if (!(flags & SCA_MIGRATE_ENABLE)) {
++ if (cpumask_equal(&p->cpus_mask, new_mask))
++ goto out;
++
++ if (WARN_ON_ONCE(p == current &&
++ is_migration_disabled(p) &&
++ !cpumask_test_cpu(task_cpu(p), new_mask))) {
++ ret = -EBUSY;
++ goto out;
++ }
++ }
+
+ /*
+ * Picking a ~random cpu helps in cases where we are changing affinity
diff --git a/debian/patches-rt/0020-locking-rtmutex-add-rwlock-implementation-based-on-r.patch b/debian/patches-rt/0019-locking-rtmutex-add-rwlock-implementation-based-on-r.patch
index 2185f232c..745d2b8f0 100644
--- a/debian/patches-rt/0020-locking-rtmutex-add-rwlock-implementation-based-on-r.patch
+++ b/debian/patches-rt/0019-locking-rtmutex-add-rwlock-implementation-based-on-r.patch
@@ -1,8 +1,8 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 17:18:06 +0200
-Subject: [PATCH 20/23] locking/rtmutex: add rwlock implementation based on
+Subject: [PATCH 19/22] locking/rtmutex: add rwlock implementation based on
rtmutex
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The implementation is bias-based, similar to the rwsem implementation.
@@ -266,7 +266,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ lock->rtmutex.save_state = 1;
+}
+
-+int __read_rt_trylock(struct rt_rw_lock *lock)
++static int __read_rt_trylock(struct rt_rw_lock *lock)
+{
+ int r, old;
+
diff --git a/debian/patches-rt/0019-printk-introduce-emergency-messages.patch b/debian/patches-rt/0019-printk-introduce-emergency-messages.patch
deleted file mode 100644
index 040c65c6d..000000000
--- a/debian/patches-rt/0019-printk-introduce-emergency-messages.patch
+++ /dev/null
@@ -1,273 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:57 +0100
-Subject: [PATCH 19/25] printk: introduce emergency messages
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-Console messages are generally either critical or non-critical.
-Critical messages are messages such as crashes or sysrq output.
-Critical messages should never be lost because generally they provide
-important debugging information.
-
-Since all console messages are output via a fully preemptible printk
-kernel thread, it is possible that messages are not output because
-that thread cannot be scheduled (BUG in scheduler, run-away RT task,
-etc).
-
-To allow critical messages to be output independent of the
-schedulability of the printk task, introduce an emergency mechanism
-that _immediately_ outputs the message to the consoles. To avoid
-possible unbounded latency issues, the emergency mechanism only
-outputs the printk line provided by the caller and ignores any
-pending messages in the log buffer.
-
-Critical messages are identified as messages (by default) with log
-level LOGLEVEL_WARNING or more critical. This is configurable via the
-kernel option CONSOLE_LOGLEVEL_EMERGENCY.
-
-Any messages output as emergency messages are skipped by the printk
-thread on those consoles that output the emergency message.
-
-In order for a console driver to support emergency messages, the
-write_atomic function must be implemented by the driver. If not
-implemented, the emergency messages are handled like all other
-messages and are printed by the printk thread.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/printk.h | 2
- kernel/printk/printk.c | 111 ++++++++++++++++++++++++++++++++++++++++++++++---
- lib/Kconfig.debug | 17 +++++++
- 3 files changed, 124 insertions(+), 6 deletions(-)
-
---- a/include/linux/printk.h
-+++ b/include/linux/printk.h
-@@ -59,6 +59,7 @@ static inline const char *printk_skip_he
- */
- #define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT
- #define CONSOLE_LOGLEVEL_QUIET CONFIG_CONSOLE_LOGLEVEL_QUIET
-+#define CONSOLE_LOGLEVEL_EMERGENCY CONFIG_CONSOLE_LOGLEVEL_EMERGENCY
-
- extern int console_printk[];
-
-@@ -66,6 +67,7 @@ extern int console_printk[];
- #define default_message_loglevel (console_printk[1])
- #define minimum_console_loglevel (console_printk[2])
- #define default_console_loglevel (console_printk[3])
-+#define emergency_console_loglevel (console_printk[4])
-
- static inline void console_silent(void)
- {
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -45,6 +45,7 @@
- #include <linux/ctype.h>
- #include <linux/uio.h>
- #include <linux/kthread.h>
-+#include <linux/clocksource.h>
- #include <linux/printk_ringbuffer.h>
- #include <linux/sched/clock.h>
- #include <linux/sched/debug.h>
-@@ -61,11 +62,12 @@
- #include "braille.h"
- #include "internal.h"
-
--int console_printk[4] = {
-+int console_printk[5] = {
- CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */
- MESSAGE_LOGLEVEL_DEFAULT, /* default_message_loglevel */
- CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */
- CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */
-+ CONSOLE_LOGLEVEL_EMERGENCY, /* emergency_console_loglevel */
- };
- EXPORT_SYMBOL_GPL(console_printk);
-
-@@ -498,6 +500,9 @@ static u32 log_next(u32 idx)
- return idx + msg->len;
- }
-
-+static void printk_emergency(char *buffer, int level, u64 ts_nsec, u16 cpu,
-+ char *text, u16 text_len);
-+
- /* insert record into the buffer, discard old ones, update heads */
- static int log_store(u32 caller_id, int facility, int level,
- enum log_flags flags, u64 ts_nsec, u16 cpu,
-@@ -1649,7 +1654,7 @@ static void printk_write_history(struct
- * The console_lock must be held.
- */
- static void call_console_drivers(u64 seq, const char *ext_text, size_t ext_len,
-- const char *text, size_t len)
-+ const char *text, size_t len, int level)
- {
- struct console *con;
-
-@@ -1666,6 +1671,18 @@ static void call_console_drivers(u64 seq
- con->wrote_history = 1;
- con->printk_seq = seq - 1;
- }
-+ if (con->write_atomic && level < emergency_console_loglevel) {
-+ /* skip emergency messages, already printed */
-+ if (con->printk_seq < seq)
-+ con->printk_seq = seq;
-+ continue;
-+ }
-+ if (con->flags & CON_BOOT) {
-+ /* skip emergency messages, already printed */
-+ if (con->printk_seq < seq)
-+ con->printk_seq = seq;
-+ continue;
-+ }
- if (!con->write)
- continue;
- if (!cpu_online(raw_smp_processor_id()) &&
-@@ -1785,8 +1802,12 @@ asmlinkage int vprintk_emit(int facility
-
- cpu = raw_smp_processor_id();
-
-- text = rbuf;
-- text_len = vscnprintf(text, PRINTK_SPRINT_MAX, fmt, args);
-+ /*
-+ * If this turns out to be an emergency message, there
-+ * may need to be a prefix added. Leave room for it.
-+ */
-+ text = rbuf + PREFIX_MAX;
-+ text_len = vscnprintf(text, PRINTK_SPRINT_MAX - PREFIX_MAX, fmt, args);
-
- /* strip and flag a trailing newline */
- if (text_len && text[text_len-1] == '\n') {
-@@ -1819,6 +1840,14 @@ asmlinkage int vprintk_emit(int facility
- if (dict)
- lflags |= LOG_NEWLINE;
-
-+ /*
-+ * NOTE:
-+ * - rbuf points to beginning of allocated buffer
-+ * - text points to beginning of text
-+ * - there is room before text for prefix
-+ */
-+ printk_emergency(rbuf, level, ts_nsec, cpu, text, text_len);
-+
- printed_len = log_store(caller_id, facility, level, lflags, ts_nsec, cpu,
- dict, dictlen, text, text_len);
-
-@@ -1900,7 +1929,7 @@ static ssize_t msg_print_ext_body(char *
- char *dict, size_t dict_len,
- char *text, size_t text_len) { return 0; }
- static void call_console_drivers(u64 seq, const char *ext_text, size_t ext_len,
-- const char *text, size_t len) {}
-+ const char *text, size_t len, int level) {}
- static size_t msg_print_text(const struct printk_log *msg, bool syslog,
- bool time, char *buf, size_t size) { return 0; }
- static bool suppress_message_printing(int level) { return false; }
-@@ -2683,7 +2712,7 @@ static int printk_kthread_func(void *dat
- console_lock();
- console_may_schedule = 0;
- call_console_drivers(master_seq, ext_text,
-- ext_len, text, len);
-+ ext_len, text, len, msg->level);
- if (len > 0 || ext_len > 0)
- printk_delay(msg->level);
- console_unlock();
-@@ -3111,6 +3140,76 @@ void kmsg_dump_rewind(struct kmsg_dumper
- logbuf_unlock_irqrestore(flags);
- }
- EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
-+
-+static bool console_can_emergency(int level)
-+{
-+ struct console *con;
-+
-+ for_each_console(con) {
-+ if (!(con->flags & CON_ENABLED))
-+ continue;
-+ if (con->write_atomic && level < emergency_console_loglevel)
-+ return true;
-+ if (con->write && (con->flags & CON_BOOT))
-+ return true;
-+ }
-+ return false;
-+}
-+
-+static void call_emergency_console_drivers(int level, const char *text,
-+ size_t text_len)
-+{
-+ struct console *con;
-+
-+ for_each_console(con) {
-+ if (!(con->flags & CON_ENABLED))
-+ continue;
-+ if (con->write_atomic && level < emergency_console_loglevel) {
-+ con->write_atomic(con, text, text_len);
-+ continue;
-+ }
-+ if (con->write && (con->flags & CON_BOOT)) {
-+ con->write(con, text, text_len);
-+ continue;
-+ }
-+ }
-+}
-+
-+static void printk_emergency(char *buffer, int level, u64 ts_nsec, u16 cpu,
-+ char *text, u16 text_len)
-+{
-+ struct printk_log msg;
-+ size_t prefix_len;
-+
-+ if (!console_can_emergency(level))
-+ return;
-+
-+ msg.level = level;
-+ msg.ts_nsec = ts_nsec;
-+ msg.cpu = cpu;
-+ msg.facility = 0;
-+
-+ /* "text" must have PREFIX_MAX preceding bytes available */
-+
-+ prefix_len = print_prefix(&msg,
-+ console_msg_format & MSG_FORMAT_SYSLOG,
-+ printk_time, buffer);
-+ /* move the prefix forward to the beginning of the message text */
-+ text -= prefix_len;
-+ memmove(text, buffer, prefix_len);
-+ text_len += prefix_len;
-+
-+ text[text_len++] = '\n';
-+
-+ call_emergency_console_drivers(level, text, text_len);
-+
-+ touch_softlockup_watchdog_sync();
-+ clocksource_touch_watchdog();
-+ rcu_cpu_stall_reset();
-+ touch_nmi_watchdog();
-+
-+ printk_delay(level);
-+}
- #endif
-
- void console_atomic_lock(unsigned int *flags)
---- a/lib/Kconfig.debug
-+++ b/lib/Kconfig.debug
-@@ -61,6 +61,23 @@ config CONSOLE_LOGLEVEL_QUIET
- will be used as the loglevel. IOW passing "quiet" will be the
- equivalent of passing "loglevel=<CONSOLE_LOGLEVEL_QUIET>"
-
-+config CONSOLE_LOGLEVEL_EMERGENCY
-+ int "Emergency console loglevel (1-15)"
-+ range 1 15
-+ default "5"
-+ help
-+ The loglevel to determine if a console message is an emergency
-+ message.
-+
-+ If supported by the console driver, emergency messages will be
-+ flushed to the console immediately. This can cause significant system
-+ latencies so the value should be set such that only significant
-+ messages are classified as emergency messages.
-+
-+ Setting a default here is equivalent to passing in
-+ emergency_loglevel=<x> in the kernel bootargs. emergency_loglevel=<x>
-+ continues to override whatever value is specified here as well.
-+
- config MESSAGE_LOGLEVEL_DEFAULT
- int "Default message log level (1-7)"
- range 1 7
diff --git a/debian/patches-rt/0019-printk-move-printk_info-into-separate-array.patch b/debian/patches-rt/0019-printk-move-printk_info-into-separate-array.patch
new file mode 100644
index 000000000..4f56dcf22
--- /dev/null
+++ b/debian/patches-rt/0019-printk-move-printk_info-into-separate-array.patch
@@ -0,0 +1,606 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Sat, 19 Sep 2020 00:40:19 +0206
+Subject: [PATCH 19/25] printk: move printk_info into separate array
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+The majority of the size of a descriptor is taken up by meta data,
+which is often not of interest to the ringbuffer (for example,
+when performing state checks). Since descriptors are often
+temporarily stored on the stack, keeping their size minimal will
+help reduce stack pressure.
+
+Rather than embedding the printk_info into the descriptor, create
+a separate printk_info array. The index of a descriptor in the
+descriptor array corresponds to the printk_info with the same
+index in the printk_info array. The rules for validity of a
+printk_info match the existing rules for the data blocks: the
+descriptor must be in a consistent state.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200918223421.21621-2-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 30 ++++++-
+ kernel/printk/printk_ringbuffer.c | 145 ++++++++++++++++++++++++--------------
+ kernel/printk/printk_ringbuffer.h | 29 ++++---
+ 3 files changed, 133 insertions(+), 71 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -967,11 +967,11 @@ void log_buf_vmcoreinfo_setup(void)
+ VMCOREINFO_STRUCT_SIZE(prb_desc_ring);
+ VMCOREINFO_OFFSET(prb_desc_ring, count_bits);
+ VMCOREINFO_OFFSET(prb_desc_ring, descs);
++ VMCOREINFO_OFFSET(prb_desc_ring, infos);
+ VMCOREINFO_OFFSET(prb_desc_ring, head_id);
+ VMCOREINFO_OFFSET(prb_desc_ring, tail_id);
+
+ VMCOREINFO_STRUCT_SIZE(prb_desc);
+- VMCOREINFO_OFFSET(prb_desc, info);
+ VMCOREINFO_OFFSET(prb_desc, state_var);
+ VMCOREINFO_OFFSET(prb_desc, text_blk_lpos);
+ VMCOREINFO_OFFSET(prb_desc, dict_blk_lpos);
+@@ -1105,11 +1105,13 @@ static char setup_dict_buf[CONSOLE_EXT_L
+
+ void __init setup_log_buf(int early)
+ {
++ struct printk_info *new_infos;
+ unsigned int new_descs_count;
+ struct prb_desc *new_descs;
+ struct printk_info info;
+ struct printk_record r;
+ size_t new_descs_size;
++ size_t new_infos_size;
+ unsigned long flags;
+ char *new_dict_buf;
+ char *new_log_buf;
+@@ -1150,8 +1152,7 @@ void __init setup_log_buf(int early)
+ if (unlikely(!new_dict_buf)) {
+ pr_err("log_buf_len: %lu dict bytes not available\n",
+ new_log_buf_len);
+- memblock_free(__pa(new_log_buf), new_log_buf_len);
+- return;
++ goto err_free_log_buf;
+ }
+
+ new_descs_size = new_descs_count * sizeof(struct prb_desc);
+@@ -1159,9 +1160,15 @@ void __init setup_log_buf(int early)
+ if (unlikely(!new_descs)) {
+ pr_err("log_buf_len: %zu desc bytes not available\n",
+ new_descs_size);
+- memblock_free(__pa(new_dict_buf), new_log_buf_len);
+- memblock_free(__pa(new_log_buf), new_log_buf_len);
+- return;
++ goto err_free_dict_buf;
++ }
++
++ new_infos_size = new_descs_count * sizeof(struct printk_info);
++ new_infos = memblock_alloc(new_infos_size, LOG_ALIGN);
++ if (unlikely(!new_infos)) {
++ pr_err("log_buf_len: %zu info bytes not available\n",
++ new_infos_size);
++ goto err_free_descs;
+ }
+
+ prb_rec_init_rd(&r, &info,
+@@ -1171,7 +1178,8 @@ void __init setup_log_buf(int early)
+ prb_init(&printk_rb_dynamic,
+ new_log_buf, ilog2(new_log_buf_len),
+ new_dict_buf, ilog2(new_log_buf_len),
+- new_descs, ilog2(new_descs_count));
++ new_descs, ilog2(new_descs_count),
++ new_infos);
+
+ logbuf_lock_irqsave(flags);
+
+@@ -1200,6 +1208,14 @@ void __init setup_log_buf(int early)
+ pr_info("log_buf_len: %u bytes\n", log_buf_len);
+ pr_info("early log buf free: %u(%u%%)\n",
+ free, (free * 100) / __LOG_BUF_LEN);
++ return;
++
++err_free_descs:
++ memblock_free(__pa(new_descs), new_descs_size);
++err_free_dict_buf:
++ memblock_free(__pa(new_dict_buf), new_log_buf_len);
++err_free_log_buf:
++ memblock_free(__pa(new_log_buf), new_log_buf_len);
+ }
+
+ static bool __read_mostly ignore_loglevel;
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -15,10 +15,10 @@
+ * The printk_ringbuffer is made up of 3 internal ringbuffers:
+ *
+ * desc_ring
+- * A ring of descriptors. A descriptor contains all record meta data
+- * (sequence number, timestamp, loglevel, etc.) as well as internal state
+- * information about the record and logical positions specifying where in
+- * the other ringbuffers the text and dictionary strings are located.
++ * A ring of descriptors and their meta data (such as sequence number,
++ * timestamp, loglevel, etc.) as well as internal state information about
++ * the record and logical positions specifying where in the other
++ * ringbuffers the text and dictionary strings are located.
+ *
+ * text_data_ring
+ * A ring of data blocks. A data block consists of an unsigned long
+@@ -38,13 +38,14 @@
+ *
+ * Descriptor Ring
+ * ~~~~~~~~~~~~~~~
+- * The descriptor ring is an array of descriptors. A descriptor contains all
+- * the meta data of a printk record as well as blk_lpos structs pointing to
+- * associated text and dictionary data blocks (see "Data Rings" below). Each
+- * descriptor is assigned an ID that maps directly to index values of the
+- * descriptor array and has a state. The ID and the state are bitwise combined
+- * into a single descriptor field named @state_var, allowing ID and state to
+- * be synchronously and atomically updated.
++ * The descriptor ring is an array of descriptors. A descriptor contains
++ * essential meta data to track the data of a printk record using
++ * blk_lpos structs pointing to associated text and dictionary data blocks
++ * (see "Data Rings" below). Each descriptor is assigned an ID that maps
++ * directly to index values of the descriptor array and has a state. The ID
++ * and the state are bitwise combined into a single descriptor field named
++ * @state_var, allowing ID and state to be synchronously and atomically
++ * updated.
+ *
+ * Descriptors have four states:
+ *
+@@ -150,6 +151,14 @@
+ * descriptor. If a data block is not valid, the @tail_lpos cannot be
+ * advanced beyond it.
+ *
++ * Info Array
++ * ~~~~~~~~~~
++ * The general meta data of printk records are stored in printk_info structs,
++ * stored in an array with the same number of elements as the descriptor ring.
++ * Each info corresponds to the descriptor of the same index in the
++ * descriptor ring. Info validity is confirmed by evaluating the corresponding
++ * descriptor before and after loading the info.
++ *
+ * Usage
+ * -----
+ * Here are some simple examples demonstrating writers and readers. For the
+@@ -367,6 +376,15 @@ static struct prb_desc *to_desc(struct p
+ return &desc_ring->descs[DESC_INDEX(desc_ring, n)];
+ }
+
++/*
++ * Return the printk_info associated with @n. @n can be either a
++ * descriptor ID or a sequence number.
++ */
++static struct printk_info *to_info(struct prb_desc_ring *desc_ring, u64 n)
++{
++ return &desc_ring->infos[DESC_INDEX(desc_ring, n)];
++}
++
+ static struct prb_data_block *to_block(struct prb_data_ring *data_ring,
+ unsigned long begin_lpos)
+ {
+@@ -425,10 +443,16 @@ static enum desc_state get_desc_state(un
+ * Get a copy of a specified descriptor and return its queried state. If the
+ * descriptor is in an inconsistent state (miss or reserved), the caller can
+ * only expect the descriptor's @state_var field to be valid.
++ *
++ * The sequence number and caller_id can be optionally retrieved. Like all
++ * non-state_var data, they are only valid if the descriptor is in a
++ * consistent state.
+ */
+ static enum desc_state desc_read(struct prb_desc_ring *desc_ring,
+- unsigned long id, struct prb_desc *desc_out)
++ unsigned long id, struct prb_desc *desc_out,
++ u64 *seq_out, u32 *caller_id_out)
+ {
++ struct printk_info *info = to_info(desc_ring, id);
+ struct prb_desc *desc = to_desc(desc_ring, id);
+ atomic_long_t *state_var = &desc->state_var;
+ enum desc_state d_state;
+@@ -469,11 +493,14 @@ static enum desc_state desc_read(struct
+ * state has been re-checked. A memcpy() for all of @desc
+ * cannot be used because of the atomic_t @state_var field.
+ */
+- memcpy(&desc_out->info, &desc->info, sizeof(desc_out->info)); /* LMM(desc_read:C) */
+ memcpy(&desc_out->text_blk_lpos, &desc->text_blk_lpos,
+- sizeof(desc_out->text_blk_lpos)); /* also part of desc_read:C */
++ sizeof(desc_out->text_blk_lpos)); /* LMM(desc_read:C) */
+ memcpy(&desc_out->dict_blk_lpos, &desc->dict_blk_lpos,
+ sizeof(desc_out->dict_blk_lpos)); /* also part of desc_read:C */
++ if (seq_out)
++ *seq_out = info->seq; /* also part of desc_read:C */
++ if (caller_id_out)
++ *caller_id_out = info->caller_id; /* also part of desc_read:C */
+
+ /*
+ * 1. Guarantee the descriptor content is loaded before re-checking
+@@ -588,7 +615,8 @@ static bool data_make_reusable(struct pr
+ */
+ id = blk->id; /* LMM(data_make_reusable:A) */
+
+- d_state = desc_read(desc_ring, id, &desc); /* LMM(data_make_reusable:B) */
++ d_state = desc_read(desc_ring, id, &desc,
++ NULL, NULL); /* LMM(data_make_reusable:B) */
+
+ switch (d_state) {
+ case desc_miss:
+@@ -771,7 +799,7 @@ static bool desc_push_tail(struct printk
+ enum desc_state d_state;
+ struct prb_desc desc;
+
+- d_state = desc_read(desc_ring, tail_id, &desc);
++ d_state = desc_read(desc_ring, tail_id, &desc, NULL, NULL);
+
+ switch (d_state) {
+ case desc_miss:
+@@ -823,7 +851,8 @@ static bool desc_push_tail(struct printk
+ * equal to @head_id so there is no risk of pushing the tail past the
+ * head.
+ */
+- d_state = desc_read(desc_ring, DESC_ID(tail_id + 1), &desc); /* LMM(desc_push_tail:A) */
++ d_state = desc_read(desc_ring, DESC_ID(tail_id + 1), &desc,
++ NULL, NULL); /* LMM(desc_push_tail:A) */
+
+ if (d_state == desc_finalized || d_state == desc_reusable) {
+ /*
+@@ -1264,6 +1293,7 @@ static struct prb_desc *desc_reopen_last
+ struct prb_desc desc;
+ struct prb_desc *d;
+ unsigned long id;
++ u32 cid;
+
+ id = atomic_long_read(&desc_ring->head_id);
+
+@@ -1271,8 +1301,8 @@ static struct prb_desc *desc_reopen_last
+ * To reduce unnecessarily reopening, first check if the descriptor
+ * state and caller ID are correct.
+ */
+- d_state = desc_read(desc_ring, id, &desc);
+- if (d_state != desc_committed || desc.info.caller_id != caller_id)
++ d_state = desc_read(desc_ring, id, &desc, NULL, &cid);
++ if (d_state != desc_committed || cid != caller_id)
+ return NULL;
+
+ d = to_desc(desc_ring, id);
+@@ -1353,6 +1383,8 @@ static struct prb_desc *desc_reopen_last
+ bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
+ struct printk_record *r, u32 caller_id)
+ {
++ struct prb_desc_ring *desc_ring = &rb->desc_ring;
++ struct printk_info *info;
+ unsigned int data_size;
+ struct prb_desc *d;
+ unsigned long id;
+@@ -1360,7 +1392,7 @@ bool prb_reserve_in_last(struct prb_rese
+ local_irq_save(e->irqflags);
+
+ /* Transition the newest descriptor back to the reserved state. */
+- d = desc_reopen_last(&rb->desc_ring, caller_id, &id);
++ d = desc_reopen_last(desc_ring, caller_id, &id);
+ if (!d) {
+ local_irq_restore(e->irqflags);
+ goto fail_reopen;
+@@ -1368,6 +1400,8 @@ bool prb_reserve_in_last(struct prb_rese
+
+ /* Now the writer has exclusive access: LMM(prb_reserve_in_last:A) */
+
++ info = to_info(desc_ring, id);
++
+ /*
+ * Set the @e fields here so that prb_commit() can be used if
+ * anything fails from now on.
+@@ -1380,14 +1414,14 @@ bool prb_reserve_in_last(struct prb_rese
+ * exclusive access at that point. The descriptor may have
+ * changed since then.
+ */
+- if (caller_id != d->info.caller_id)
++ if (caller_id != info->caller_id)
+ goto fail;
+
+ if (BLK_DATALESS(&d->text_blk_lpos)) {
+- if (WARN_ON_ONCE(d->info.text_len != 0)) {
++ if (WARN_ON_ONCE(info->text_len != 0)) {
+ pr_warn_once("wrong text_len value (%hu, expecting 0)\n",
+- d->info.text_len);
+- d->info.text_len = 0;
++ info->text_len);
++ info->text_len = 0;
+ }
+
+ if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
+@@ -1404,12 +1438,12 @@ bool prb_reserve_in_last(struct prb_rese
+ * the meta data (@text_len) is not sane, use the full data
+ * block size.
+ */
+- if (WARN_ON_ONCE(d->info.text_len > data_size)) {
++ if (WARN_ON_ONCE(info->text_len > data_size)) {
+ pr_warn_once("wrong text_len value (%hu, expecting <=%u)\n",
+- d->info.text_len, data_size);
+- d->info.text_len = data_size;
++ info->text_len, data_size);
++ info->text_len = data_size;
+ }
+- r->text_buf_size += d->info.text_len;
++ r->text_buf_size += info->text_len;
+
+ if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
+ goto fail;
+@@ -1424,7 +1458,7 @@ bool prb_reserve_in_last(struct prb_rese
+ r->dict_buf = NULL;
+ r->dict_buf_size = 0;
+
+- r->info = &d->info;
++ r->info = info;
+
+ e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos);
+
+@@ -1486,6 +1520,7 @@ bool prb_reserve(struct prb_reserved_ent
+ struct printk_record *r)
+ {
+ struct prb_desc_ring *desc_ring = &rb->desc_ring;
++ struct printk_info *info;
+ struct prb_desc *d;
+ unsigned long id;
+ u64 seq;
+@@ -1512,14 +1547,15 @@ bool prb_reserve(struct prb_reserved_ent
+ }
+
+ d = to_desc(desc_ring, id);
++ info = to_info(desc_ring, id);
+
+ /*
+ * All @info fields (except @seq) are cleared and must be filled in
+ * by the writer. Save @seq before clearing because it is used to
+ * determine the new sequence number.
+ */
+- seq = d->info.seq;
+- memset(&d->info, 0, sizeof(d->info));
++ seq = info->seq;
++ memset(info, 0, sizeof(*info));
+
+ /*
+ * Set the @e fields here so that prb_commit() can be used if
+@@ -1533,16 +1569,16 @@ bool prb_reserve(struct prb_reserved_ent
+ * Otherwise just increment it by a full wrap.
+ *
+ * @seq is considered "never been set" if it has a value of 0,
+- * _except_ for @descs[0], which was specially setup by the ringbuffer
++ * _except_ for @infos[0], which was specially setup by the ringbuffer
+ * initializer and therefore is always considered as set.
+ *
+ * See the "Bootstrap" comment block in printk_ringbuffer.h for
+ * details about how the initializer bootstraps the descriptors.
+ */
+ if (seq == 0 && DESC_INDEX(desc_ring, id) != 0)
+- d->info.seq = DESC_INDEX(desc_ring, id);
++ info->seq = DESC_INDEX(desc_ring, id);
+ else
+- d->info.seq = seq + DESCS_COUNT(desc_ring);
++ info->seq = seq + DESCS_COUNT(desc_ring);
+
+ /*
+ * New data is about to be reserved. Once that happens, previous
+@@ -1550,7 +1586,7 @@ bool prb_reserve(struct prb_reserved_ent
+ * previous descriptor now so that it can be made available to
+ * readers. (For seq==0 there is no previous descriptor.)
+ */
+- if (d->info.seq > 0)
++ if (info->seq > 0)
+ desc_make_final(desc_ring, DESC_ID(id - 1));
+
+ r->text_buf = data_alloc(rb, &rb->text_data_ring, r->text_buf_size,
+@@ -1571,7 +1607,7 @@ bool prb_reserve(struct prb_reserved_ent
+ if (r->dict_buf_size && !r->dict_buf)
+ r->dict_buf_size = 0;
+
+- r->info = &d->info;
++ r->info = info;
+
+ /* Record full text space used by record. */
+ e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos);
+@@ -1726,12 +1762,12 @@ static bool copy_data(struct prb_data_ri
+ /*
+ * Actual cannot be less than expected. It can be more than expected
+ * because of the trailing alignment padding.
++ *
++ * Note that invalid @len values can occur because the caller loads
++ * the value during an allowed data race.
+ */
+- if (WARN_ON_ONCE(data_size < (unsigned int)len)) {
+- pr_warn_once("wrong data size (%u, expecting >=%hu) for data: %.*s\n",
+- data_size, len, data_size, data);
++ if (data_size < (unsigned int)len)
+ return false;
+- }
+
+ /* Caller interested in the line count? */
+ if (line_count)
+@@ -1764,8 +1800,9 @@ static int desc_read_finalized_seq(struc
+ {
+ struct prb_data_blk_lpos *blk_lpos = &desc_out->text_blk_lpos;
+ enum desc_state d_state;
++ u64 s;
+
+- d_state = desc_read(desc_ring, id, desc_out);
++ d_state = desc_read(desc_ring, id, desc_out, &s, NULL);
+
+ /*
+ * An unexpected @id (desc_miss) or @seq mismatch means the record
+@@ -1775,7 +1812,7 @@ static int desc_read_finalized_seq(struc
+ if (d_state == desc_miss ||
+ d_state == desc_reserved ||
+ d_state == desc_committed ||
+- desc_out->info.seq != seq) {
++ s != seq) {
+ return -EINVAL;
+ }
+
+@@ -1802,6 +1839,7 @@ static int prb_read(struct printk_ringbu
+ struct printk_record *r, unsigned int *line_count)
+ {
+ struct prb_desc_ring *desc_ring = &rb->desc_ring;
++ struct printk_info *info = to_info(desc_ring, seq);
+ struct prb_desc *rdesc = to_desc(desc_ring, seq);
+ atomic_long_t *state_var = &rdesc->state_var;
+ struct prb_desc desc;
+@@ -1823,10 +1861,10 @@ static int prb_read(struct printk_ringbu
+
+ /* If requested, copy meta data. */
+ if (r->info)
+- memcpy(r->info, &desc.info, sizeof(*(r->info)));
++ memcpy(r->info, info, sizeof(*(r->info)));
+
+ /* Copy text data. If it fails, this is a data-less record. */
+- if (!copy_data(&rb->text_data_ring, &desc.text_blk_lpos, desc.info.text_len,
++ if (!copy_data(&rb->text_data_ring, &desc.text_blk_lpos, info->text_len,
+ r->text_buf, r->text_buf_size, line_count)) {
+ return -ENOENT;
+ }
+@@ -1836,7 +1874,7 @@ static int prb_read(struct printk_ringbu
+ * important. So if it fails, modify the copied meta data to report
+ * that there is no dict data, thus silently dropping the dict data.
+ */
+- if (!copy_data(&rb->dict_data_ring, &desc.dict_blk_lpos, desc.info.dict_len,
++ if (!copy_data(&rb->dict_data_ring, &desc.dict_blk_lpos, info->dict_len,
+ r->dict_buf, r->dict_buf_size, NULL)) {
+ if (r->info)
+ r->info->dict_len = 0;
+@@ -1853,11 +1891,12 @@ static u64 prb_first_seq(struct printk_r
+ enum desc_state d_state;
+ struct prb_desc desc;
+ unsigned long id;
++ u64 seq;
+
+ for (;;) {
+ id = atomic_long_read(&rb->desc_ring.tail_id); /* LMM(prb_first_seq:A) */
+
+- d_state = desc_read(desc_ring, id, &desc); /* LMM(prb_first_seq:B) */
++ d_state = desc_read(desc_ring, id, &desc, &seq, NULL); /* LMM(prb_first_seq:B) */
+
+ /*
+ * This loop will not be infinite because the tail is
+@@ -1886,7 +1925,7 @@ static u64 prb_first_seq(struct printk_r
+ smp_rmb(); /* LMM(prb_first_seq:C) */
+ }
+
+- return desc.info.seq;
++ return seq;
+ }
+
+ /*
+@@ -2049,6 +2088,7 @@ u64 prb_next_seq(struct printk_ringbuffe
+ * @dictbits: The size of @dict_buf as a power-of-2 value.
+ * @descs: The descriptor buffer for ringbuffer records.
+ * @descbits: The count of @descs items as a power-of-2 value.
++ * @infos: The printk_info buffer for ringbuffer records.
+ *
+ * This is the public function available to writers to setup a ringbuffer
+ * during runtime using provided buffers.
+@@ -2060,12 +2100,15 @@ u64 prb_next_seq(struct printk_ringbuffe
+ void prb_init(struct printk_ringbuffer *rb,
+ char *text_buf, unsigned int textbits,
+ char *dict_buf, unsigned int dictbits,
+- struct prb_desc *descs, unsigned int descbits)
++ struct prb_desc *descs, unsigned int descbits,
++ struct printk_info *infos)
+ {
+ memset(descs, 0, _DESCS_COUNT(descbits) * sizeof(descs[0]));
++ memset(infos, 0, _DESCS_COUNT(descbits) * sizeof(infos[0]));
+
+ rb->desc_ring.count_bits = descbits;
+ rb->desc_ring.descs = descs;
++ rb->desc_ring.infos = infos;
+ atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits));
+ atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits));
+
+@@ -2081,14 +2124,14 @@ void prb_init(struct printk_ringbuffer *
+
+ atomic_long_set(&rb->fail, 0);
+
+- descs[0].info.seq = -(u64)_DESCS_COUNT(descbits);
+-
+- descs[_DESCS_COUNT(descbits) - 1].info.seq = 0;
+ atomic_long_set(&(descs[_DESCS_COUNT(descbits) - 1].state_var), DESC0_SV(descbits));
+ descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.begin = FAILED_LPOS;
+ descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.next = FAILED_LPOS;
+ descs[_DESCS_COUNT(descbits) - 1].dict_blk_lpos.begin = FAILED_LPOS;
+ descs[_DESCS_COUNT(descbits) - 1].dict_blk_lpos.next = FAILED_LPOS;
++
++ infos[0].seq = -(u64)_DESCS_COUNT(descbits);
++ infos[_DESCS_COUNT(descbits) - 1].seq = 0;
+ }
+
+ /**
+--- a/kernel/printk/printk_ringbuffer.h
++++ b/kernel/printk/printk_ringbuffer.h
+@@ -58,7 +58,6 @@ struct prb_data_blk_lpos {
+ * @state_var: A bitwise combination of descriptor ID and descriptor state.
+ */
+ struct prb_desc {
+- struct printk_info info;
+ atomic_long_t state_var;
+ struct prb_data_blk_lpos text_blk_lpos;
+ struct prb_data_blk_lpos dict_blk_lpos;
+@@ -76,6 +75,7 @@ struct prb_data_ring {
+ struct prb_desc_ring {
+ unsigned int count_bits;
+ struct prb_desc *descs;
++ struct printk_info *infos;
+ atomic_long_t head_id;
+ atomic_long_t tail_id;
+ };
+@@ -237,19 +237,8 @@ enum desc_state {
+ static char _##name##_dict[1U << ((avgdictbits) + (descbits))] \
+ __aligned(__alignof__(unsigned long)); \
+ static struct prb_desc _##name##_descs[_DESCS_COUNT(descbits)] = { \
+- /* this will be the first record reserved by a writer */ \
+- [0] = { \
+- .info = { \
+- /* will be incremented to 0 on the first reservation */ \
+- .seq = -(u64)_DESCS_COUNT(descbits), \
+- }, \
+- }, \
+ /* the initial head and tail */ \
+ [_DESCS_COUNT(descbits) - 1] = { \
+- .info = { \
+- /* reports the first seq value during the bootstrap phase */ \
+- .seq = 0, \
+- }, \
+ /* reusable */ \
+ .state_var = ATOMIC_INIT(DESC0_SV(descbits)), \
+ /* no associated data block */ \
+@@ -257,10 +246,23 @@ static struct prb_desc _##name##_descs[_
+ .dict_blk_lpos = FAILED_BLK_LPOS, \
+ }, \
+ }; \
++static struct printk_info _##name##_infos[_DESCS_COUNT(descbits)] = { \
++ /* this will be the first record reserved by a writer */ \
++ [0] = { \
++ /* will be incremented to 0 on the first reservation */ \
++ .seq = -(u64)_DESCS_COUNT(descbits), \
++ }, \
++ /* the initial head and tail */ \
++ [_DESCS_COUNT(descbits) - 1] = { \
++ /* reports the first seq value during the bootstrap phase */ \
++ .seq = 0, \
++ }, \
++}; \
+ static struct printk_ringbuffer name = { \
+ .desc_ring = { \
+ .count_bits = descbits, \
+ .descs = &_##name##_descs[0], \
++ .infos = &_##name##_infos[0], \
+ .head_id = ATOMIC_INIT(DESC0_ID(descbits)), \
+ .tail_id = ATOMIC_INIT(DESC0_ID(descbits)), \
+ }, \
+@@ -336,7 +338,8 @@ void prb_final_commit(struct prb_reserve
+ void prb_init(struct printk_ringbuffer *rb,
+ char *text_buf, unsigned int text_buf_size,
+ char *dict_buf, unsigned int dict_buf_size,
+- struct prb_desc *descs, unsigned int descs_count_bits);
++ struct prb_desc *descs, unsigned int descs_count_bits,
++ struct printk_info *infos);
+ unsigned int prb_record_text_space(struct prb_reserved_entry *e);
+
+ /* Reader Interface */
diff --git a/debian/patches-rt/0019-sched-Comment-affine_move_task.patch b/debian/patches-rt/0019-sched-Comment-affine_move_task.patch
new file mode 100644
index 000000000..dfa85622b
--- /dev/null
+++ b/debian/patches-rt/0019-sched-Comment-affine_move_task.patch
@@ -0,0 +1,124 @@
+From: Valentin Schneider <valentin.schneider@arm.com>
+Date: Fri, 23 Oct 2020 12:12:17 +0200
+Subject: [PATCH 19/19] sched: Comment affine_move_task()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20201013140116.26651-2-valentin.schneider@arm.com
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/sched/core.c | 81 ++++++++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 79 insertions(+), 2 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2084,7 +2084,75 @@ void do_set_cpus_allowed(struct task_str
+ }
+
+ /*
+- * This function is wildly self concurrent, consider at least 3 times.
++ * This function is wildly self concurrent; here be dragons.
++ *
++ *
++ * When given a valid mask, __set_cpus_allowed_ptr() must block until the
++ * designated task is enqueued on an allowed CPU. If that task is currently
++ * running, we have to kick it out using the CPU stopper.
++ *
++ * Migrate-Disable comes along and tramples all over our nice sandcastle.
++ * Consider:
++ *
++ * Initial conditions: P0->cpus_mask = [0, 1]
++ *
++ * P0@CPU0 P1
++ *
++ * migrate_disable();
++ * <preempted>
++ * set_cpus_allowed_ptr(P0, [1]);
++ *
++ * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
++ * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
++ * This means we need the following scheme:
++ *
++ * P0@CPU0 P1
++ *
++ * migrate_disable();
++ * <preempted>
++ * set_cpus_allowed_ptr(P0, [1]);
++ * <blocks>
++ * <resumes>
++ * migrate_enable();
++ * __set_cpus_allowed_ptr();
++ * <wakes local stopper>
++ * `--> <woken on migration completion>
++ *
++ * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
++ * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
++ * task p are serialized by p->pi_lock, which we can leverage: the one that
++ * should come into effect at the end of the Migrate-Disable region is the last
++ * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
++ * but we still need to properly signal those waiting tasks at the appropriate
++ * moment.
++ *
++ * This is implemented using struct set_affinity_pending. The first
++ * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
++ * setup an instance of that struct and install it on the targeted task_struct.
++ * Any and all further callers will reuse that instance. Those then wait for
++ * a completion signaled at the tail of the CPU stopper callback (1), triggered
++ * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
++ *
++ *
++ * (1) In the cases covered above. There is one more where the completion is
++ * signaled within affine_move_task() itself: when a subsequent affinity request
++ * cancels the need for an active migration. Consider:
++ *
++ * Initial conditions: P0->cpus_mask = [0, 1]
++ *
++ * P0@CPU0 P1 P2
++ *
++ * migrate_disable();
++ * <preempted>
++ * set_cpus_allowed_ptr(P0, [1]);
++ * <blocks>
++ * set_cpus_allowed_ptr(P0, [0, 1]);
++ * <signal completion>
++ * <awakes>
++ *
++ * Note that the above is safe vs a concurrent migrate_enable(), as any
++ * pending affinity completion is preceded an uninstallion of
++ * p->migration_pending done with p->pi_lock held.
+ */
+ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
+ int dest_cpu, unsigned int flags)
+@@ -2128,6 +2196,7 @@ static int affine_move_task(struct rq *r
+ if (!(flags & SCA_MIGRATE_ENABLE)) {
+ /* serialized by p->pi_lock */
+ if (!p->migration_pending) {
++ /* Install the request */
+ refcount_set(&my_pending.refs, 1);
+ init_completion(&my_pending.done);
+ p->migration_pending = &my_pending;
+@@ -2171,7 +2240,11 @@ static int affine_move_task(struct rq *r
+ }
+
+ if (task_running(rq, p) || p->state == TASK_WAKING) {
+-
++ /*
++ * Lessen races (and headaches) by delegating
++ * is_migration_disabled(p) checks to the stopper, which will
++ * run on the same CPU as said p.
++ */
+ task_rq_unlock(rq, p, rf);
+ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
+
+@@ -2196,6 +2269,10 @@ static int affine_move_task(struct rq *r
+ if (refcount_dec_and_test(&pending->refs))
+ wake_up_var(&pending->refs);
+
++ /*
++ * Block the original owner of &pending until all subsequent callers
++ * have seen the completion and decremented the refcount
++ */
+ wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
+
+ return 0;
diff --git a/debian/patches-rt/0021-locking-rtmutex-wire-up-RT-s-locking.patch b/debian/patches-rt/0020-locking-rtmutex-wire-up-RT-s-locking.patch
index 74907d759..eba4bbf77 100644
--- a/debian/patches-rt/0021-locking-rtmutex-wire-up-RT-s-locking.patch
+++ b/debian/patches-rt/0020-locking-rtmutex-wire-up-RT-s-locking.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 17:31:14 +0200
-Subject: [PATCH 21/23] locking/rtmutex: wire up RT's locking
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Subject: [PATCH 20/22] locking/rtmutex: wire up RT's locking
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
diff --git a/debian/patches-rt/0020-printk-move-dictionary-keys-to-dev_printk_info.patch b/debian/patches-rt/0020-printk-move-dictionary-keys-to-dev_printk_info.patch
new file mode 100644
index 000000000..563380df1
--- /dev/null
+++ b/debian/patches-rt/0020-printk-move-dictionary-keys-to-dev_printk_info.patch
@@ -0,0 +1,764 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 21 Sep 2020 13:24:45 +0206
+Subject: [PATCH 20/25] printk: move dictionary keys to dev_printk_info
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+Dictionaries are only used for SUBSYSTEM and DEVICE properties. The
+current implementation stores the property names each time they are
+used. This requires more space than otherwise necessary. Also,
+because the dictionary entries are currently considered optional,
+it cannot be relied upon that they are always available, even if the
+writer wanted to store them. These issues will increase should new
+dictionary properties be introduced.
+
+Rather than storing the subsystem and device properties in the
+dict ring, introduce a struct dev_printk_info with separate fields
+to store only the property values. Embed this struct within the
+struct printk_info to provide guaranteed availability.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/87mu1jl6ne.fsf@jogness.linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ Documentation/admin-guide/kdump/gdbmacros.txt | 73 +++++------
+ drivers/base/core.c | 46 ++-----
+ include/linux/dev_printk.h | 8 +
+ include/linux/printk.h | 6
+ kernel/printk/internal.h | 4
+ kernel/printk/printk.c | 166 +++++++++++++-------------
+ kernel/printk/printk_ringbuffer.h | 3
+ kernel/printk/printk_safe.c | 2
+ scripts/gdb/linux/dmesg.py | 16 +-
+ 9 files changed, 164 insertions(+), 160 deletions(-)
+
+--- a/Documentation/admin-guide/kdump/gdbmacros.txt
++++ b/Documentation/admin-guide/kdump/gdbmacros.txt
+@@ -172,13 +172,13 @@ end
+
+ define dump_record
+ set var $desc = $arg0
+- if ($argc > 1)
+- set var $prev_flags = $arg1
++ set var $info = $arg1
++ if ($argc > 2)
++ set var $prev_flags = $arg2
+ else
+ set var $prev_flags = 0
+ end
+
+- set var $info = &$desc->info
+ set var $prefix = 1
+ set var $newline = 1
+
+@@ -237,44 +237,36 @@ define dump_record
+
+ # handle dictionary data
+
+- set var $begin = $desc->dict_blk_lpos.begin % (1U << prb->dict_data_ring.size_bits)
+- set var $next = $desc->dict_blk_lpos.next % (1U << prb->dict_data_ring.size_bits)
+-
+- # handle data-less record
+- if ($begin & 1)
+- set var $dict_len = 0
+- set var $dict = ""
+- else
+- # handle wrapping data block
+- if ($begin > $next)
+- set var $begin = 0
+- end
+-
+- # skip over descriptor id
+- set var $begin = $begin + sizeof(long)
+-
+- # handle truncated message
+- if ($next - $begin < $info->dict_len)
+- set var $dict_len = $next - $begin
+- else
+- set var $dict_len = $info->dict_len
++ set var $dict = &$info->dev_info.subsystem[0]
++ set var $dict_len = sizeof($info->dev_info.subsystem)
++ if ($dict[0] != '\0')
++ printf " SUBSYSTEM="
++ set var $idx = 0
++ while ($idx < $dict_len)
++ set var $c = $dict[$idx]
++ if ($c == '\0')
++ loop_break
++ else
++ if ($c < ' ' || $c >= 127 || $c == '\\')
++ printf "\\x%02x", $c
++ else
++ printf "%c", $c
++ end
++ end
++ set var $idx = $idx + 1
+ end
+-
+- set var $dict = &prb->dict_data_ring.data[$begin]
++ printf "\n"
+ end
+
+- if ($dict_len > 0)
++ set var $dict = &$info->dev_info.device[0]
++ set var $dict_len = sizeof($info->dev_info.device)
++ if ($dict[0] != '\0')
++ printf " DEVICE="
+ set var $idx = 0
+- set var $line = 1
+ while ($idx < $dict_len)
+- if ($line)
+- printf " "
+- set var $line = 0
+- end
+ set var $c = $dict[$idx]
+ if ($c == '\0')
+- printf "\n"
+- set var $line = 1
++ loop_break
+ else
+ if ($c < ' ' || $c >= 127 || $c == '\\')
+ printf "\\x%02x", $c
+@@ -288,10 +280,10 @@ define dump_record
+ end
+ end
+ document dump_record
+- Dump a single record. The first parameter is the descriptor
+- sequence number, the second is optional and specifies the
+- previous record's flags, used for properly formatting
+- continued lines.
++ Dump a single record. The first parameter is the descriptor,
++ the second parameter is the info, the third parameter is
++ optional and specifies the previous record's flags, used for
++ properly formatting continued lines.
+ end
+
+ define dmesg
+@@ -311,12 +303,13 @@ define dmesg
+
+ while (1)
+ set var $desc = &prb->desc_ring.descs[$id % $desc_count]
++ set var $info = &prb->desc_ring.infos[$id % $desc_count]
+
+ # skip non-committed record
+ set var $state = 3 & ($desc->state_var.counter >> $desc_flags_shift)
+ if ($state == $desc_committed || $state == $desc_finalized)
+- dump_record $desc $prev_flags
+- set var $prev_flags = $desc->info.flags
++ dump_record $desc $info $prev_flags
++ set var $prev_flags = $info->flags
+ end
+
+ set var $id = ($id + 1) & $id_mask
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -4061,22 +4061,21 @@ void device_shutdown(void)
+ */
+
+ #ifdef CONFIG_PRINTK
+-static int
+-create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen)
++static void
++set_dev_info(const struct device *dev, struct dev_printk_info *dev_info)
+ {
+ const char *subsys;
+- size_t pos = 0;
++
++ memset(dev_info, 0, sizeof(*dev_info));
+
+ if (dev->class)
+ subsys = dev->class->name;
+ else if (dev->bus)
+ subsys = dev->bus->name;
+ else
+- return 0;
++ return;
+
+- pos += snprintf(hdr + pos, hdrlen - pos, "SUBSYSTEM=%s", subsys);
+- if (pos >= hdrlen)
+- goto overflow;
++ strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem));
+
+ /*
+ * Add device identifier DEVICE=:
+@@ -4092,41 +4091,28 @@ create_syslog_header(const struct device
+ c = 'b';
+ else
+ c = 'c';
+- pos++;
+- pos += snprintf(hdr + pos, hdrlen - pos,
+- "DEVICE=%c%u:%u",
+- c, MAJOR(dev->devt), MINOR(dev->devt));
++
++ snprintf(dev_info->device, sizeof(dev_info->device),
++ "%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt));
+ } else if (strcmp(subsys, "net") == 0) {
+ struct net_device *net = to_net_dev(dev);
+
+- pos++;
+- pos += snprintf(hdr + pos, hdrlen - pos,
+- "DEVICE=n%u", net->ifindex);
++ snprintf(dev_info->device, sizeof(dev_info->device),
++ "n%u", net->ifindex);
+ } else {
+- pos++;
+- pos += snprintf(hdr + pos, hdrlen - pos,
+- "DEVICE=+%s:%s", subsys, dev_name(dev));
++ snprintf(dev_info->device, sizeof(dev_info->device),
++ "+%s:%s", subsys, dev_name(dev));
+ }
+-
+- if (pos >= hdrlen)
+- goto overflow;
+-
+- return pos;
+-
+-overflow:
+- dev_WARN(dev, "device/subsystem name too long");
+- return 0;
+ }
+
+ int dev_vprintk_emit(int level, const struct device *dev,
+ const char *fmt, va_list args)
+ {
+- char hdr[128];
+- size_t hdrlen;
++ struct dev_printk_info dev_info;
+
+- hdrlen = create_syslog_header(dev, hdr, sizeof(hdr));
++ set_dev_info(dev, &dev_info);
+
+- return vprintk_emit(0, level, hdrlen ? hdr : NULL, hdrlen, fmt, args);
++ return vprintk_emit(0, level, &dev_info, fmt, args);
+ }
+ EXPORT_SYMBOL(dev_vprintk_emit);
+
+--- a/include/linux/dev_printk.h
++++ b/include/linux/dev_printk.h
+@@ -21,6 +21,14 @@
+
+ struct device;
+
++#define PRINTK_INFO_SUBSYSTEM_LEN 16
++#define PRINTK_INFO_DEVICE_LEN 48
++
++struct dev_printk_info {
++ char subsystem[PRINTK_INFO_SUBSYSTEM_LEN];
++ char device[PRINTK_INFO_DEVICE_LEN];
++};
++
+ #ifdef CONFIG_PRINTK
+
+ __printf(3, 0) __cold
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -159,10 +159,12 @@ static inline void printk_nmi_direct_ent
+ static inline void printk_nmi_direct_exit(void) { }
+ #endif /* PRINTK_NMI */
+
++struct dev_printk_info;
++
+ #ifdef CONFIG_PRINTK
+-asmlinkage __printf(5, 0)
++asmlinkage __printf(4, 0)
+ int vprintk_emit(int facility, int level,
+- const char *dict, size_t dictlen,
++ const struct dev_printk_info *dev_info,
+ const char *fmt, va_list args);
+
+ asmlinkage __printf(1, 0)
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -14,9 +14,9 @@
+
+ extern raw_spinlock_t logbuf_lock;
+
+-__printf(5, 0)
++__printf(4, 0)
+ int vprintk_store(int facility, int level,
+- const char *dict, size_t dictlen,
++ const struct dev_printk_info *dev_info,
+ const char *fmt, va_list args);
+
+ __printf(1, 0) int vprintk_default(const char *fmt, va_list args);
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -296,8 +296,8 @@ static int console_msg_format = MSG_FORM
+
+ /*
+ * The printk log buffer consists of a sequenced collection of records, each
+- * containing variable length message and dictionary text. Every record
+- * also contains its own meta-data (@info).
++ * containing variable length message text. Every record also contains its
++ * own meta-data (@info).
+ *
+ * Every record meta-data carries the timestamp in microseconds, as well as
+ * the standard userspace syslog level and syslog facility. The usual kernel
+@@ -310,9 +310,7 @@ static int console_msg_format = MSG_FORM
+ * terminated.
+ *
+ * Optionally, a record can carry a dictionary of properties (key/value
+- * pairs), to provide userspace with a machine-readable message context. The
+- * length of the dictionary is available in @dict_len. The dictionary is not
+- * terminated.
++ * pairs), to provide userspace with a machine-readable message context.
+ *
+ * Examples for well-defined, commonly used property names are:
+ * DEVICE=b12:8 device identifier
+@@ -322,21 +320,20 @@ static int console_msg_format = MSG_FORM
+ * +sound:card0 subsystem:devname
+ * SUBSYSTEM=pci driver-core subsystem name
+ *
+- * Valid characters in property names are [a-zA-Z0-9.-_]. The plain text value
+- * follows directly after a '=' character. Every property is terminated by
+- * a '\0' character. The last property is not terminated.
++ * Valid characters in property names are [a-zA-Z0-9.-_]. Property names
++ * and values are terminated by a '\0' character.
+ *
+ * Example of record values:
+- * record.text_buf = "it's a line" (unterminated)
+- * record.dict_buf = "DEVICE=b8:2\0DRIVER=bug" (unterminated)
+- * record.info.seq = 56
+- * record.info.ts_nsec = 36863
+- * record.info.text_len = 11
+- * record.info.dict_len = 22
+- * record.info.facility = 0 (LOG_KERN)
+- * record.info.flags = 0
+- * record.info.level = 3 (LOG_ERR)
+- * record.info.caller_id = 299 (task 299)
++ * record.text_buf = "it's a line" (unterminated)
++ * record.info.seq = 56
++ * record.info.ts_nsec = 36863
++ * record.info.text_len = 11
++ * record.info.facility = 0 (LOG_KERN)
++ * record.info.flags = 0
++ * record.info.level = 3 (LOG_ERR)
++ * record.info.caller_id = 299 (task 299)
++ * record.info.dev_info.subsystem = "pci" (terminated)
++ * record.info.dev_info.device = "+pci:0000:00:01.0" (terminated)
+ *
+ * The 'struct printk_info' buffer must never be directly exported to
+ * userspace, it is a kernel-private implementation detail that might
+@@ -498,19 +495,19 @@ static void truncate_msg(u16 *text_len,
+ /* insert record into the buffer, discard old ones, update heads */
+ static int log_store(u32 caller_id, int facility, int level,
+ enum log_flags flags, u64 ts_nsec,
+- const char *dict, u16 dict_len,
++ const struct dev_printk_info *dev_info,
+ const char *text, u16 text_len)
+ {
+ struct prb_reserved_entry e;
+ struct printk_record r;
+ u16 trunc_msg_len = 0;
+
+- prb_rec_init_wr(&r, text_len, dict_len);
++ prb_rec_init_wr(&r, text_len, 0);
+
+ if (!prb_reserve(&e, prb, &r)) {
+ /* truncate the message if it is too long for empty buffer */
+ truncate_msg(&text_len, &trunc_msg_len);
+- prb_rec_init_wr(&r, text_len + trunc_msg_len, dict_len);
++ prb_rec_init_wr(&r, text_len + trunc_msg_len, 0);
+ /* survive when the log buffer is too small for trunc_msg */
+ if (!prb_reserve(&e, prb, &r))
+ return 0;
+@@ -521,10 +518,6 @@ static int log_store(u32 caller_id, int
+ if (trunc_msg_len)
+ memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len);
+ r.info->text_len = text_len + trunc_msg_len;
+- if (r.dict_buf) {
+- memcpy(&r.dict_buf[0], dict, dict_len);
+- r.info->dict_len = dict_len;
+- }
+ r.info->facility = facility;
+ r.info->level = level & 7;
+ r.info->flags = flags & 0x1f;
+@@ -533,6 +526,8 @@ static int log_store(u32 caller_id, int
+ else
+ r.info->ts_nsec = local_clock();
+ r.info->caller_id = caller_id;
++ if (dev_info)
++ memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
+
+ /* insert message */
+ if ((flags & LOG_CONT) || !(flags & LOG_NEWLINE))
+@@ -613,9 +608,9 @@ static ssize_t info_print_ext_header(cha
+ ts_usec, info->flags & LOG_CONT ? 'c' : '-', caller);
+ }
+
+-static ssize_t msg_print_ext_body(char *buf, size_t size,
+- char *dict, size_t dict_len,
+- char *text, size_t text_len)
++static ssize_t msg_add_ext_text(char *buf, size_t size,
++ const char *text, size_t text_len,
++ unsigned char endc)
+ {
+ char *p = buf, *e = buf + size;
+ size_t i;
+@@ -629,36 +624,44 @@ static ssize_t msg_print_ext_body(char *
+ else
+ append_char(&p, e, c);
+ }
+- append_char(&p, e, '\n');
++ append_char(&p, e, endc);
+
+- if (dict_len) {
+- bool line = true;
++ return p - buf;
++}
+
+- for (i = 0; i < dict_len; i++) {
+- unsigned char c = dict[i];
++static ssize_t msg_add_dict_text(char *buf, size_t size,
++ const char *key, const char *val)
++{
++ size_t val_len = strlen(val);
++ ssize_t len;
+
+- if (line) {
+- append_char(&p, e, ' ');
+- line = false;
+- }
++ if (!val_len)
++ return 0;
+
+- if (c == '\0') {
+- append_char(&p, e, '\n');
+- line = true;
+- continue;
+- }
++ len = msg_add_ext_text(buf, size, "", 0, ' '); /* dict prefix */
++ len += msg_add_ext_text(buf + len, size - len, key, strlen(key), '=');
++ len += msg_add_ext_text(buf + len, size - len, val, val_len, '\n');
+
+- if (c < ' ' || c >= 127 || c == '\\') {
+- p += scnprintf(p, e - p, "\\x%02x", c);
+- continue;
+- }
++ return len;
++}
+
+- append_char(&p, e, c);
+- }
+- append_char(&p, e, '\n');
+- }
++static ssize_t msg_print_ext_body(char *buf, size_t size,
++ char *text, size_t text_len,
++ struct dev_printk_info *dev_info)
++{
++ ssize_t len;
+
+- return p - buf;
++ len = msg_add_ext_text(buf, size, text, text_len, '\n');
++
++ if (!dev_info)
++ goto out;
++
++ len += msg_add_dict_text(buf + len, size - len, "SUBSYSTEM",
++ dev_info->subsystem);
++ len += msg_add_dict_text(buf + len, size - len, "DEVICE",
++ dev_info->device);
++out:
++ return len;
+ }
+
+ /* /dev/kmsg - userspace message inject/listen interface */
+@@ -670,7 +673,6 @@ struct devkmsg_user {
+
+ struct printk_info info;
+ char text_buf[CONSOLE_EXT_LOG_MAX];
+- char dict_buf[CONSOLE_EXT_LOG_MAX];
+ struct printk_record record;
+ };
+
+@@ -681,7 +683,7 @@ int devkmsg_emit(int facility, int level
+ int r;
+
+ va_start(args, fmt);
+- r = vprintk_emit(facility, level, NULL, 0, fmt, args);
++ r = vprintk_emit(facility, level, NULL, fmt, args);
+ va_end(args);
+
+ return r;
+@@ -791,8 +793,8 @@ static ssize_t devkmsg_read(struct file
+
+ len = info_print_ext_header(user->buf, sizeof(user->buf), r->info);
+ len += msg_print_ext_body(user->buf + len, sizeof(user->buf) - len,
+- &r->dict_buf[0], r->info->dict_len,
+- &r->text_buf[0], r->info->text_len);
++ &r->text_buf[0], r->info->text_len,
++ &r->info->dev_info);
+
+ user->seq = r->info->seq + 1;
+ logbuf_unlock_irq();
+@@ -905,7 +907,7 @@ static int devkmsg_open(struct inode *in
+
+ prb_rec_init_rd(&user->record, &user->info,
+ &user->text_buf[0], sizeof(user->text_buf),
+- &user->dict_buf[0], sizeof(user->dict_buf));
++ NULL, 0);
+
+ logbuf_lock_irq();
+ user->seq = prb_first_valid_seq(prb);
+@@ -949,6 +951,8 @@ const struct file_operations kmsg_fops =
+ */
+ void log_buf_vmcoreinfo_setup(void)
+ {
++ struct dev_printk_info *dev_info = NULL;
++
+ VMCOREINFO_SYMBOL(prb);
+ VMCOREINFO_SYMBOL(printk_rb_static);
+ VMCOREINFO_SYMBOL(clear_seq);
+@@ -986,6 +990,13 @@ void log_buf_vmcoreinfo_setup(void)
+ VMCOREINFO_OFFSET(printk_info, text_len);
+ VMCOREINFO_OFFSET(printk_info, dict_len);
+ VMCOREINFO_OFFSET(printk_info, caller_id);
++ VMCOREINFO_OFFSET(printk_info, dev_info);
++
++ VMCOREINFO_STRUCT_SIZE(dev_printk_info);
++ VMCOREINFO_OFFSET(dev_printk_info, subsystem);
++ VMCOREINFO_LENGTH(printk_info_subsystem, sizeof(dev_info->subsystem));
++ VMCOREINFO_OFFSET(dev_printk_info, device);
++ VMCOREINFO_LENGTH(printk_info_device, sizeof(dev_info->device));
+
+ VMCOREINFO_STRUCT_SIZE(prb_data_ring);
+ VMCOREINFO_OFFSET(prb_data_ring, size_bits);
+@@ -1078,22 +1089,19 @@ static unsigned int __init add_to_rb(str
+ struct prb_reserved_entry e;
+ struct printk_record dest_r;
+
+- prb_rec_init_wr(&dest_r, r->info->text_len, r->info->dict_len);
++ prb_rec_init_wr(&dest_r, r->info->text_len, 0);
+
+ if (!prb_reserve(&e, rb, &dest_r))
+ return 0;
+
+ memcpy(&dest_r.text_buf[0], &r->text_buf[0], r->info->text_len);
+ dest_r.info->text_len = r->info->text_len;
+- if (dest_r.dict_buf) {
+- memcpy(&dest_r.dict_buf[0], &r->dict_buf[0], r->info->dict_len);
+- dest_r.info->dict_len = r->info->dict_len;
+- }
+ dest_r.info->facility = r->info->facility;
+ dest_r.info->level = r->info->level;
+ dest_r.info->flags = r->info->flags;
+ dest_r.info->ts_nsec = r->info->ts_nsec;
+ dest_r.info->caller_id = r->info->caller_id;
++ memcpy(&dest_r.info->dev_info, &r->info->dev_info, sizeof(dest_r.info->dev_info));
+
+ prb_final_commit(&e);
+
+@@ -1101,7 +1109,6 @@ static unsigned int __init add_to_rb(str
+ }
+
+ static char setup_text_buf[CONSOLE_EXT_LOG_MAX] __initdata;
+-static char setup_dict_buf[CONSOLE_EXT_LOG_MAX] __initdata;
+
+ void __init setup_log_buf(int early)
+ {
+@@ -1173,7 +1180,7 @@ void __init setup_log_buf(int early)
+
+ prb_rec_init_rd(&r, &info,
+ &setup_text_buf[0], sizeof(setup_text_buf),
+- &setup_dict_buf[0], sizeof(setup_dict_buf));
++ NULL, 0);
+
+ prb_init(&printk_rb_dynamic,
+ new_log_buf, ilog2(new_log_buf_len),
+@@ -1911,7 +1918,9 @@ static inline u32 printk_caller_id(void)
+ 0x80000000 + raw_smp_processor_id();
+ }
+
+-static size_t log_output(int facility, int level, enum log_flags lflags, const char *dict, size_t dictlen, char *text, size_t text_len)
++static size_t log_output(int facility, int level, enum log_flags lflags,
++ const struct dev_printk_info *dev_info,
++ char *text, size_t text_len)
+ {
+ const u32 caller_id = printk_caller_id();
+
+@@ -1935,12 +1944,12 @@ static size_t log_output(int facility, i
+
+ /* Store it in the record log */
+ return log_store(caller_id, facility, level, lflags, 0,
+- dict, dictlen, text, text_len);
++ dev_info, text, text_len);
+ }
+
+ /* Must be called under logbuf_lock. */
+ int vprintk_store(int facility, int level,
+- const char *dict, size_t dictlen,
++ const struct dev_printk_info *dev_info,
+ const char *fmt, va_list args)
+ {
+ static char textbuf[LOG_LINE_MAX];
+@@ -1982,15 +1991,14 @@ int vprintk_store(int facility, int leve
+ if (level == LOGLEVEL_DEFAULT)
+ level = default_message_loglevel;
+
+- if (dict)
++ if (dev_info)
+ lflags |= LOG_NEWLINE;
+
+- return log_output(facility, level, lflags,
+- dict, dictlen, text, text_len);
++ return log_output(facility, level, lflags, dev_info, text, text_len);
+ }
+
+ asmlinkage int vprintk_emit(int facility, int level,
+- const char *dict, size_t dictlen,
++ const struct dev_printk_info *dev_info,
+ const char *fmt, va_list args)
+ {
+ int printed_len;
+@@ -2011,7 +2019,7 @@ asmlinkage int vprintk_emit(int facility
+
+ /* This stops the holder of console_sem just where we want him */
+ logbuf_lock_irqsave(flags);
+- printed_len = vprintk_store(facility, level, dict, dictlen, fmt, args);
++ printed_len = vprintk_store(facility, level, dev_info, fmt, args);
+ logbuf_unlock_irqrestore(flags);
+
+ /* If called from the scheduler, we can not call up(). */
+@@ -2045,7 +2053,7 @@ EXPORT_SYMBOL(vprintk);
+
+ int vprintk_default(const char *fmt, va_list args)
+ {
+- return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
++ return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
+ }
+ EXPORT_SYMBOL_GPL(vprintk_default);
+
+@@ -2108,8 +2116,8 @@ static ssize_t info_print_ext_header(cha
+ return 0;
+ }
+ static ssize_t msg_print_ext_body(char *buf, size_t size,
+- char *dict, size_t dict_len,
+- char *text, size_t text_len) { return 0; }
++ char *text, size_t text_len,
++ struct dev_printk_info *dev_info) { return 0; }
+ static void console_lock_spinning_enable(void) { }
+ static int console_lock_spinning_disable_and_check(void) { return 0; }
+ static void call_console_drivers(const char *ext_text, size_t ext_len,
+@@ -2398,7 +2406,6 @@ void console_unlock(void)
+ {
+ static char ext_text[CONSOLE_EXT_LOG_MAX];
+ static char text[LOG_LINE_MAX + PREFIX_MAX];
+- static char dict[LOG_LINE_MAX];
+ unsigned long flags;
+ bool do_cond_resched, retry;
+ struct printk_info info;
+@@ -2409,7 +2416,7 @@ void console_unlock(void)
+ return;
+ }
+
+- prb_rec_init_rd(&r, &info, text, sizeof(text), dict, sizeof(dict));
++ prb_rec_init_rd(&r, &info, text, sizeof(text), NULL, 0);
+
+ /*
+ * Console drivers are called with interrupts disabled, so
+@@ -2481,10 +2488,9 @@ void console_unlock(void)
+ r.info);
+ ext_len += msg_print_ext_body(ext_text + ext_len,
+ sizeof(ext_text) - ext_len,
+- &r.dict_buf[0],
+- r.info->dict_len,
+ &r.text_buf[0],
+- r.info->text_len);
++ r.info->text_len,
++ &r.info->dev_info);
+ }
+ len = record_print_text(&r,
+ console_msg_format & MSG_FORMAT_SYSLOG,
+@@ -3063,7 +3069,7 @@ int vprintk_deferred(const char *fmt, va
+ {
+ int r;
+
+- r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
++ r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
+ defer_console_output();
+
+ return r;
+--- a/kernel/printk/printk_ringbuffer.h
++++ b/kernel/printk/printk_ringbuffer.h
+@@ -4,6 +4,7 @@
+ #define _KERNEL_PRINTK_RINGBUFFER_H
+
+ #include <linux/atomic.h>
++#include <linux/dev_printk.h>
+
+ /*
+ * Meta information about each stored message.
+@@ -21,6 +22,8 @@ struct printk_info {
+ u8 flags:5; /* internal record flags */
+ u8 level:3; /* syslog level */
+ u32 caller_id; /* thread id or processor id */
++
++ struct dev_printk_info dev_info;
+ };
+
+ /*
+--- a/kernel/printk/printk_safe.c
++++ b/kernel/printk/printk_safe.c
+@@ -375,7 +375,7 @@ void __printk_safe_exit(void)
+ raw_spin_trylock(&logbuf_lock)) {
+ int len;
+
+- len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
++ len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
+ raw_spin_unlock(&logbuf_lock);
+ defer_console_output();
+ return len;
+--- a/scripts/gdb/linux/dmesg.py
++++ b/scripts/gdb/linux/dmesg.py
+@@ -52,6 +52,12 @@ atomic_long_type = utils.CachedType("ato
+ addr = utils.read_ulong(desc_ring, off)
+ descs = utils.read_memoryview(inf, addr, desc_sz * desc_ring_count).tobytes()
+
++ # read in info array
++ info_sz = printk_info_type.get_type().sizeof
++ off = prb_desc_ring_type.get_type()['infos'].bitpos // 8
++ addr = utils.read_ulong(desc_ring, off)
++ infos = utils.read_memoryview(inf, addr, info_sz * desc_ring_count).tobytes()
++
+ # read in text data ring structure
+ off = printk_ringbuffer_type.get_type()['text_data_ring'].bitpos // 8
+ addr = prb_addr + off
+@@ -73,9 +79,8 @@ atomic_long_type = utils.CachedType("ato
+ begin_off = off + (prb_data_blk_lpos_type.get_type()['begin'].bitpos // 8)
+ next_off = off + (prb_data_blk_lpos_type.get_type()['next'].bitpos // 8)
+
+- off = prb_desc_type.get_type()['info'].bitpos // 8
+- ts_off = off + printk_info_type.get_type()['ts_nsec'].bitpos // 8
+- len_off = off + printk_info_type.get_type()['text_len'].bitpos // 8
++ ts_off = printk_info_type.get_type()['ts_nsec'].bitpos // 8
++ len_off = printk_info_type.get_type()['text_len'].bitpos // 8
+
+ # definitions from kernel/printk/printk_ringbuffer.h
+ desc_committed = 1
+@@ -95,6 +100,7 @@ atomic_long_type = utils.CachedType("ato
+ while True:
+ ind = did % desc_ring_count
+ desc_off = desc_sz * ind
++ info_off = info_sz * ind
+
+ # skip non-committed record
+ state = 3 & (utils.read_u64(descs, desc_off + sv_off +
+@@ -119,7 +125,7 @@ atomic_long_type = utils.CachedType("ato
+ # skip over descriptor id
+ text_start = begin + utils.get_long_type().sizeof
+
+- text_len = utils.read_u16(descs, desc_off + len_off)
++ text_len = utils.read_u16(infos, info_off + len_off)
+
+ # handle truncated message
+ if end - text_start < text_len:
+@@ -128,7 +134,7 @@ atomic_long_type = utils.CachedType("ato
+ text = text_data[text_start:text_start + text_len].decode(
+ encoding='utf8', errors='replace')
+
+- time_stamp = utils.read_u64(descs, desc_off + ts_off)
++ time_stamp = utils.read_u64(infos, info_off + ts_off)
+
+ for line in text.splitlines():
+ msg = u"[{time:12.6f}] {line}\n".format(
diff --git a/debian/patches-rt/0022-locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch b/debian/patches-rt/0021-locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
index c8c0cb82e..4880548a8 100644
--- a/debian/patches-rt/0022-locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
+++ b/debian/patches-rt/0021-locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 12 Oct 2017 17:34:38 +0200
-Subject: [PATCH 22/23] locking/rtmutex: add ww_mutex addon for mutex-rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Subject: [PATCH 21/22] locking/rtmutex: add ww_mutex addon for mutex-rt
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
@@ -304,7 +304,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -2261,7 +2407,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -2246,7 +2392,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
raw_spin_lock_irq(&lock->wait_lock);
/* sleep on the mutex */
set_current_state(TASK_INTERRUPTIBLE);
@@ -313,7 +313,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
-@@ -2331,3 +2477,97 @@ bool rt_mutex_cleanup_proxy_lock(struct
+@@ -2316,3 +2462,97 @@ bool rt_mutex_cleanup_proxy_lock(struct
return cleanup;
}
@@ -431,7 +431,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rt_mutex_waiter *waiter,
--- a/kernel/locking/rwsem-rt.c
+++ b/kernel/locking/rwsem-rt.c
-@@ -130,7 +130,7 @@ static int __sched __down_read_common(st
+@@ -138,7 +138,7 @@ static int __sched __down_read_common(st
*/
rt_mutex_init_waiter(&waiter, false);
ret = rt_mutex_slowlock_locked(m, state, NULL, RT_MUTEX_MIN_CHAINWALK,
diff --git a/debian/patches-rt/0021-printk-implement-KERN_CONT.patch b/debian/patches-rt/0021-printk-implement-KERN_CONT.patch
deleted file mode 100644
index 153c99e7f..000000000
--- a/debian/patches-rt/0021-printk-implement-KERN_CONT.patch
+++ /dev/null
@@ -1,133 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:29:59 +0100
-Subject: [PATCH 21/25] printk: implement KERN_CONT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-Implement KERN_CONT based on the printing CPU rather than on the
-printing task. As long as the KERN_CONT messages are coming from the
-same CPU and no non-KERN_CONT messages come, the messages are assumed
-to belong to each other.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk.c | 65 +++++++++++++++++++++++++++----------------------
- 1 file changed, 37 insertions(+), 28 deletions(-)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -1714,8 +1714,6 @@ static inline u32 printk_caller_id(void)
- 0x80000000 + raw_smp_processor_id();
- }
-
--/* FIXME: no support for LOG_CONT */
--#if 0
- /*
- * Continuation lines are buffered, and not committed to the record buffer
- * until the line is complete, or a race forces it. The line fragments
-@@ -1726,52 +1724,55 @@ static struct cont {
- char buf[LOG_LINE_MAX];
- size_t len; /* length == 0 means unused buffer */
- u32 caller_id; /* printk_caller_id() of first print */
-+ int cpu_owner; /* cpu of first print */
- u64 ts_nsec; /* time of first print */
- u8 level; /* log level of first message */
- u8 facility; /* log facility of first message */
- enum log_flags flags; /* prefix, newline flags */
--} cont;
-+} cont[2];
-
--static void cont_flush(void)
-+static void cont_flush(int ctx)
- {
-- if (cont.len == 0)
-+ struct cont *c = &cont[ctx];
-+
-+ if (c->len == 0)
- return;
-
-- log_store(cont.caller_id, cont.facility, cont.level, cont.flags,
-- cont.ts_nsec, NULL, 0, cont.buf, cont.len);
-- cont.len = 0;
-+ log_store(c->caller_id, c->facility, c->level, c->flags,
-+ c->ts_nsec, c->cpu_owner, NULL, 0, c->buf, c->len);
-+ c->len = 0;
- }
-
--static bool cont_add(u32 caller_id, int facility, int level,
-+static void cont_add(int ctx, int cpu, u32 caller_id, int facility, int level,
- enum log_flags flags, const char *text, size_t len)
- {
-+ struct cont *c = &cont[ctx];
-+
-+ if (cpu != c->cpu_owner || !(flags & LOG_CONT))
-+ cont_flush(ctx);
-+
- /* If the line gets too long, split it up in separate records. */
-- if (cont.len + len > sizeof(cont.buf)) {
-- cont_flush();
-- return false;
-- }
-+ while (c->len + len > sizeof(c->buf))
-+ cont_flush(ctx);
-
-- if (!cont.len) {
-- cont.facility = facility;
-- cont.level = level;
-- cont.caller_id = caller_id;
-- cont.ts_nsec = local_clock();
-- cont.flags = flags;
-+ if (!c->len) {
-+ c->facility = facility;
-+ c->level = level;
-+ c->caller_id = caller_id;
-+ c->ts_nsec = local_clock();
-+ c->flags = flags;
-+ c->cpu_owner = cpu;
- }
-
-- memcpy(cont.buf + cont.len, text, len);
-- cont.len += len;
-+ memcpy(c->buf + c->len, text, len);
-+ c->len += len;
-
- // The original flags come from the first line,
- // but later continuations can add a newline.
- if (flags & LOG_NEWLINE) {
-- cont.flags |= LOG_NEWLINE;
-- cont_flush();
-+ c->flags |= LOG_NEWLINE;
- }
--
-- return true;
- }
--#endif /* 0 */
-
- /* ring buffer used as memory allocator for temporary sprint buffers */
- DECLARE_STATIC_PRINTKRB(sprint_rb,
-@@ -1783,6 +1784,7 @@ asmlinkage int vprintk_emit(int facility
- const char *fmt, va_list args)
- {
- const u32 caller_id = printk_caller_id();
-+ int ctx = !!in_nmi();
- enum log_flags lflags = 0;
- int printed_len = 0;
- struct prb_handle h;
-@@ -1848,8 +1850,15 @@ asmlinkage int vprintk_emit(int facility
- */
- printk_emergency(rbuf, level, ts_nsec, cpu, text, text_len);
-
-- printed_len = log_store(caller_id, facility, level, lflags, ts_nsec, cpu,
-- dict, dictlen, text, text_len);
-+ if ((lflags & LOG_CONT) || !(lflags & LOG_NEWLINE)) {
-+ cont_add(ctx, cpu, caller_id, facility, level, lflags, text, text_len);
-+ printed_len = text_len;
-+ } else {
-+ if (cpu == cont[ctx].cpu_owner)
-+ cont_flush(ctx);
-+ printed_len = log_store(caller_id, facility, level, lflags, ts_nsec, cpu,
-+ dict, dictlen, text, text_len);
-+ }
-
- prb_commit(&h);
- return printed_len;
diff --git a/debian/patches-rt/0021-printk-remove-dict-ring.patch b/debian/patches-rt/0021-printk-remove-dict-ring.patch
new file mode 100644
index 000000000..6007f977f
--- /dev/null
+++ b/debian/patches-rt/0021-printk-remove-dict-ring.patch
@@ -0,0 +1,787 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Sat, 19 Sep 2020 00:40:21 +0206
+Subject: [PATCH 21/25] printk: remove dict ring
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+Since there is no code that will ever store anything into the dict
+ring, remove it. If any future dictionary properties are to be
+added, these should be added to the struct printk_info.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200918223421.21621-4-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 46 +++--------
+ kernel/printk/printk_ringbuffer.c | 155 +++++++++-----------------------------
+ kernel/printk/printk_ringbuffer.h | 63 +++------------
+ 3 files changed, 64 insertions(+), 200 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -427,7 +427,6 @@ static u32 log_buf_len = __LOG_BUF_LEN;
+ * Define the average message size. This only affects the number of
+ * descriptors that will be available. Underestimating is better than
+ * overestimating (too many available descriptors is better than not enough).
+- * The dictionary buffer will be the same size as the text buffer.
+ */
+ #define PRB_AVGBITS 5 /* 32 character average length */
+
+@@ -435,7 +434,7 @@ static u32 log_buf_len = __LOG_BUF_LEN;
+ #error CONFIG_LOG_BUF_SHIFT value too small.
+ #endif
+ _DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS,
+- PRB_AVGBITS, PRB_AVGBITS, &__log_buf[0]);
++ PRB_AVGBITS, &__log_buf[0]);
+
+ static struct printk_ringbuffer printk_rb_dynamic;
+
+@@ -502,12 +501,12 @@ static int log_store(u32 caller_id, int
+ struct printk_record r;
+ u16 trunc_msg_len = 0;
+
+- prb_rec_init_wr(&r, text_len, 0);
++ prb_rec_init_wr(&r, text_len);
+
+ if (!prb_reserve(&e, prb, &r)) {
+ /* truncate the message if it is too long for empty buffer */
+ truncate_msg(&text_len, &trunc_msg_len);
+- prb_rec_init_wr(&r, text_len + trunc_msg_len, 0);
++ prb_rec_init_wr(&r, text_len + trunc_msg_len);
+ /* survive when the log buffer is too small for trunc_msg */
+ if (!prb_reserve(&e, prb, &r))
+ return 0;
+@@ -906,8 +905,7 @@ static int devkmsg_open(struct inode *in
+ mutex_init(&user->lock);
+
+ prb_rec_init_rd(&user->record, &user->info,
+- &user->text_buf[0], sizeof(user->text_buf),
+- NULL, 0);
++ &user->text_buf[0], sizeof(user->text_buf));
+
+ logbuf_lock_irq();
+ user->seq = prb_first_valid_seq(prb);
+@@ -965,7 +963,6 @@ void log_buf_vmcoreinfo_setup(void)
+ VMCOREINFO_STRUCT_SIZE(printk_ringbuffer);
+ VMCOREINFO_OFFSET(printk_ringbuffer, desc_ring);
+ VMCOREINFO_OFFSET(printk_ringbuffer, text_data_ring);
+- VMCOREINFO_OFFSET(printk_ringbuffer, dict_data_ring);
+ VMCOREINFO_OFFSET(printk_ringbuffer, fail);
+
+ VMCOREINFO_STRUCT_SIZE(prb_desc_ring);
+@@ -978,7 +975,6 @@ void log_buf_vmcoreinfo_setup(void)
+ VMCOREINFO_STRUCT_SIZE(prb_desc);
+ VMCOREINFO_OFFSET(prb_desc, state_var);
+ VMCOREINFO_OFFSET(prb_desc, text_blk_lpos);
+- VMCOREINFO_OFFSET(prb_desc, dict_blk_lpos);
+
+ VMCOREINFO_STRUCT_SIZE(prb_data_blk_lpos);
+ VMCOREINFO_OFFSET(prb_data_blk_lpos, begin);
+@@ -988,7 +984,6 @@ void log_buf_vmcoreinfo_setup(void)
+ VMCOREINFO_OFFSET(printk_info, seq);
+ VMCOREINFO_OFFSET(printk_info, ts_nsec);
+ VMCOREINFO_OFFSET(printk_info, text_len);
+- VMCOREINFO_OFFSET(printk_info, dict_len);
+ VMCOREINFO_OFFSET(printk_info, caller_id);
+ VMCOREINFO_OFFSET(printk_info, dev_info);
+
+@@ -1089,7 +1084,7 @@ static unsigned int __init add_to_rb(str
+ struct prb_reserved_entry e;
+ struct printk_record dest_r;
+
+- prb_rec_init_wr(&dest_r, r->info->text_len, 0);
++ prb_rec_init_wr(&dest_r, r->info->text_len);
+
+ if (!prb_reserve(&e, rb, &dest_r))
+ return 0;
+@@ -1120,7 +1115,6 @@ void __init setup_log_buf(int early)
+ size_t new_descs_size;
+ size_t new_infos_size;
+ unsigned long flags;
+- char *new_dict_buf;
+ char *new_log_buf;
+ unsigned int free;
+ u64 seq;
+@@ -1155,19 +1149,12 @@ void __init setup_log_buf(int early)
+ return;
+ }
+
+- new_dict_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN);
+- if (unlikely(!new_dict_buf)) {
+- pr_err("log_buf_len: %lu dict bytes not available\n",
+- new_log_buf_len);
+- goto err_free_log_buf;
+- }
+-
+ new_descs_size = new_descs_count * sizeof(struct prb_desc);
+ new_descs = memblock_alloc(new_descs_size, LOG_ALIGN);
+ if (unlikely(!new_descs)) {
+ pr_err("log_buf_len: %zu desc bytes not available\n",
+ new_descs_size);
+- goto err_free_dict_buf;
++ goto err_free_log_buf;
+ }
+
+ new_infos_size = new_descs_count * sizeof(struct printk_info);
+@@ -1178,13 +1165,10 @@ void __init setup_log_buf(int early)
+ goto err_free_descs;
+ }
+
+- prb_rec_init_rd(&r, &info,
+- &setup_text_buf[0], sizeof(setup_text_buf),
+- NULL, 0);
++ prb_rec_init_rd(&r, &info, &setup_text_buf[0], sizeof(setup_text_buf));
+
+ prb_init(&printk_rb_dynamic,
+ new_log_buf, ilog2(new_log_buf_len),
+- new_dict_buf, ilog2(new_log_buf_len),
+ new_descs, ilog2(new_descs_count),
+ new_infos);
+
+@@ -1219,8 +1203,6 @@ void __init setup_log_buf(int early)
+
+ err_free_descs:
+ memblock_free(__pa(new_descs), new_descs_size);
+-err_free_dict_buf:
+- memblock_free(__pa(new_dict_buf), new_log_buf_len);
+ err_free_log_buf:
+ memblock_free(__pa(new_log_buf), new_log_buf_len);
+ }
+@@ -1471,7 +1453,7 @@ static int syslog_print(char __user *buf
+ if (!text)
+ return -ENOMEM;
+
+- prb_rec_init_rd(&r, &info, text, LOG_LINE_MAX + PREFIX_MAX, NULL, 0);
++ prb_rec_init_rd(&r, &info, text, LOG_LINE_MAX + PREFIX_MAX);
+
+ while (size > 0) {
+ size_t n;
+@@ -1558,7 +1540,7 @@ static int syslog_print_all(char __user
+ len -= get_record_print_text_size(&info, line_count, true, time);
+ }
+
+- prb_rec_init_rd(&r, &info, text, LOG_LINE_MAX + PREFIX_MAX, NULL, 0);
++ prb_rec_init_rd(&r, &info, text, LOG_LINE_MAX + PREFIX_MAX);
+
+ len = 0;
+ prb_for_each_record(seq, prb, seq, &r) {
+@@ -1928,7 +1910,7 @@ static size_t log_output(int facility, i
+ struct prb_reserved_entry e;
+ struct printk_record r;
+
+- prb_rec_init_wr(&r, text_len, 0);
++ prb_rec_init_wr(&r, text_len);
+ if (prb_reserve_in_last(&e, prb, &r, caller_id)) {
+ memcpy(&r.text_buf[r.info->text_len], text, text_len);
+ r.info->text_len += text_len;
+@@ -2416,7 +2398,7 @@ void console_unlock(void)
+ return;
+ }
+
+- prb_rec_init_rd(&r, &info, text, sizeof(text), NULL, 0);
++ prb_rec_init_rd(&r, &info, text, sizeof(text));
+
+ /*
+ * Console drivers are called with interrupts disabled, so
+@@ -3274,7 +3256,7 @@ bool kmsg_dump_get_line_nolock(struct km
+ size_t l = 0;
+ bool ret = false;
+
+- prb_rec_init_rd(&r, &info, line, size, NULL, 0);
++ prb_rec_init_rd(&r, &info, line, size);
+
+ if (!dumper->active)
+ goto out;
+@@ -3365,7 +3347,7 @@ bool kmsg_dump_get_buffer(struct kmsg_du
+ bool ret = false;
+ bool time = printk_time;
+
+- prb_rec_init_rd(&r, &info, buf, size, NULL, 0);
++ prb_rec_init_rd(&r, &info, buf, size);
+
+ if (!dumper->active || !buf || !size)
+ goto out;
+@@ -3413,7 +3395,7 @@ bool kmsg_dump_get_buffer(struct kmsg_du
+ l += record_print_text(&r, syslog, time);
+
+ /* adjust record to store to remaining buffer space */
+- prb_rec_init_rd(&r, &info, buf + l, size - l, NULL, 0);
++ prb_rec_init_rd(&r, &info, buf + l, size - l);
+
+ seq = r.info->seq + 1;
+ }
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -18,18 +18,13 @@
+ * A ring of descriptors and their meta data (such as sequence number,
+ * timestamp, loglevel, etc.) as well as internal state information about
+ * the record and logical positions specifying where in the other
+- * ringbuffers the text and dictionary strings are located.
++ * ringbuffer the text strings are located.
+ *
+ * text_data_ring
+ * A ring of data blocks. A data block consists of an unsigned long
+ * integer (ID) that maps to a desc_ring index followed by the text
+ * string of the record.
+ *
+- * dict_data_ring
+- * A ring of data blocks. A data block consists of an unsigned long
+- * integer (ID) that maps to a desc_ring index followed by the dictionary
+- * string of the record.
+- *
+ * The internal state information of a descriptor is the key element to allow
+ * readers and writers to locklessly synchronize access to the data.
+ *
+@@ -40,8 +35,8 @@
+ * ~~~~~~~~~~~~~~~
+ * The descriptor ring is an array of descriptors. A descriptor contains
+ * essential meta data to track the data of a printk record using
+- * blk_lpos structs pointing to associated text and dictionary data blocks
+- * (see "Data Rings" below). Each descriptor is assigned an ID that maps
++ * blk_lpos structs pointing to associated text data blocks (see
++ * "Data Rings" below). Each descriptor is assigned an ID that maps
+ * directly to index values of the descriptor array and has a state. The ID
+ * and the state are bitwise combined into a single descriptor field named
+ * @state_var, allowing ID and state to be synchronously and atomically
+@@ -62,8 +57,8 @@
+ * writer cannot reopen the descriptor.
+ *
+ * reusable
+- * The record exists, but its text and/or dictionary data may no longer
+- * be available.
++ * The record exists, but its text and/or meta data may no longer be
++ * available.
+ *
+ * Querying the @state_var of a record requires providing the ID of the
+ * descriptor to query. This can yield a possible fifth (pseudo) state:
+@@ -77,7 +72,7 @@
+ * When a new descriptor should be created (and the ring is full), the tail
+ * descriptor is invalidated by first transitioning to the reusable state and
+ * then invalidating all tail data blocks up to and including the data blocks
+- * associated with the tail descriptor (for text and dictionary rings). Then
++ * associated with the tail descriptor (for the text ring). Then
+ * @tail_id is advanced, followed by advancing @head_id. And finally the
+ * @state_var of the new descriptor is initialized to the new ID and reserved
+ * state.
+@@ -108,13 +103,9 @@
+ * 3) When a record is committed via prb_commit() and a newer record
+ * already exists, the record being committed is automatically finalized.
+ *
+- * Data Rings
+- * ~~~~~~~~~~
+- * The two data rings (text and dictionary) function identically. They exist
+- * separately so that their buffer sizes can be individually set and they do
+- * not affect one another.
+- *
+- * Data rings are byte arrays composed of data blocks. Data blocks are
++ * Data Ring
++ * ~~~~~~~~~
++ * The text data ring is a byte array composed of data blocks. Data blocks are
+ * referenced by blk_lpos structs that point to the logical position of the
+ * beginning of a data block and the beginning of the next adjacent data
+ * block. Logical positions are mapped directly to index values of the byte
+@@ -165,34 +156,28 @@
+ * examples a global ringbuffer (test_rb) is available (which is not the
+ * actual ringbuffer used by printk)::
+ *
+- * DEFINE_PRINTKRB(test_rb, 15, 5, 3);
++ * DEFINE_PRINTKRB(test_rb, 15, 5);
+ *
+ * This ringbuffer allows up to 32768 records (2 ^ 15) and has a size of
+- * 1 MiB (2 ^ (15 + 5)) for text data and 256 KiB (2 ^ (15 + 3)) for
+- * dictionary data.
++ * 1 MiB (2 ^ (15 + 5)) for text data.
+ *
+ * Sample writer code::
+ *
+- * const char *dictstr = "dictionary text";
+ * const char *textstr = "message text";
+ * struct prb_reserved_entry e;
+ * struct printk_record r;
+ *
+ * // specify how much to allocate
+- * prb_rec_init_wr(&r, strlen(textstr) + 1, strlen(dictstr) + 1);
++ * prb_rec_init_wr(&r, strlen(textstr) + 1);
+ *
+ * if (prb_reserve(&e, &test_rb, &r)) {
+ * snprintf(r.text_buf, r.text_buf_size, "%s", textstr);
+- * r.info->text_len = strlen(textstr);
+- *
+- * // dictionary allocation may have failed
+- * if (r.dict_buf) {
+- * snprintf(r.dict_buf, r.dict_buf_size, "%s", dictstr);
+- * r.info->dict_len = strlen(dictstr);
+- * }
+ *
++ * r.info->text_len = strlen(textstr);
+ * r.info->ts_nsec = local_clock();
++ * r.info->caller_id = printk_caller_id();
+ *
++ * // commit and finalize the record
+ * prb_final_commit(&e);
+ * }
+ *
+@@ -203,8 +188,9 @@
+ * Sample writer code (record extending)::
+ *
+ * // alternate rest of previous example
+- * r.info->ts_nsec = local_clock();
++ *
+ * r.info->text_len = strlen(textstr);
++ * r.info->ts_nsec = local_clock();
+ * r.info->caller_id = printk_caller_id();
+ *
+ * // commit the record (but do not finalize yet)
+@@ -214,7 +200,7 @@
+ * ...
+ *
+ * // specify additional 5 bytes text space to extend
+- * prb_rec_init_wr(&r, 5, 0);
++ * prb_rec_init_wr(&r, 5);
+ *
+ * if (prb_reserve_in_last(&e, &test_rb, &r, printk_caller_id())) {
+ * snprintf(&r.text_buf[r.info->text_len],
+@@ -222,6 +208,7 @@
+ *
+ * r.info->text_len += 5;
+ *
++ * // commit and finalize the record
+ * prb_final_commit(&e);
+ * }
+ *
+@@ -230,11 +217,9 @@
+ * struct printk_info info;
+ * struct printk_record r;
+ * char text_buf[32];
+- * char dict_buf[32];
+ * u64 seq;
+ *
+- * prb_rec_init_rd(&r, &info, &text_buf[0], sizeof(text_buf),
+- * &dict_buf[0], sizeof(dict_buf));
++ * prb_rec_init_rd(&r, &info, &text_buf[0], sizeof(text_buf));
+ *
+ * prb_for_each_record(0, &test_rb, &seq, &r) {
+ * if (info.seq != seq)
+@@ -245,13 +230,8 @@
+ * text_buf[r.text_buf_size - 1] = 0;
+ * }
+ *
+- * if (info.dict_len > r.dict_buf_size) {
+- * pr_warn("record %llu dict truncated\n", info.seq);
+- * dict_buf[r.dict_buf_size - 1] = 0;
+- * }
+- *
+- * pr_info("%llu: %llu: %s;%s\n", info.seq, info.ts_nsec,
+- * &text_buf[0], info.dict_len ? &dict_buf[0] : "");
++ * pr_info("%llu: %llu: %s\n", info.seq, info.ts_nsec,
++ * &text_buf[0]);
+ * }
+ *
+ * Note that additional less convenient reader functions are available to
+@@ -495,8 +475,6 @@ static enum desc_state desc_read(struct
+ */
+ memcpy(&desc_out->text_blk_lpos, &desc->text_blk_lpos,
+ sizeof(desc_out->text_blk_lpos)); /* LMM(desc_read:C) */
+- memcpy(&desc_out->dict_blk_lpos, &desc->dict_blk_lpos,
+- sizeof(desc_out->dict_blk_lpos)); /* also part of desc_read:C */
+ if (seq_out)
+ *seq_out = info->seq; /* also part of desc_read:C */
+ if (caller_id_out)
+@@ -571,7 +549,7 @@ static void desc_make_reusable(struct pr
+ }
+
+ /*
+- * Given a data ring (text or dict), put the associated descriptor of each
++ * Given the text data ring, put the associated descriptor of each
+ * data block from @lpos_begin until @lpos_end into the reusable state.
+ *
+ * If there is any problem making the associated descriptor reusable, either
+@@ -586,21 +564,12 @@ static bool data_make_reusable(struct pr
+ unsigned long *lpos_out)
+ {
+ struct prb_desc_ring *desc_ring = &rb->desc_ring;
+- struct prb_data_blk_lpos *blk_lpos;
+ struct prb_data_block *blk;
+ enum desc_state d_state;
+ struct prb_desc desc;
++ struct prb_data_blk_lpos *blk_lpos = &desc.text_blk_lpos;
+ unsigned long id;
+
+- /*
+- * Using the provided @data_ring, point @blk_lpos to the correct
+- * blk_lpos within the local copy of the descriptor.
+- */
+- if (data_ring == &rb->text_data_ring)
+- blk_lpos = &desc.text_blk_lpos;
+- else
+- blk_lpos = &desc.dict_blk_lpos;
+-
+ /* Loop until @lpos_begin has advanced to or beyond @lpos_end. */
+ while ((lpos_end - lpos_begin) - 1 < DATA_SIZE(data_ring)) {
+ blk = to_block(data_ring, lpos_begin);
+@@ -839,8 +808,6 @@ static bool desc_push_tail(struct printk
+
+ if (!data_push_tail(rb, &rb->text_data_ring, desc.text_blk_lpos.next))
+ return false;
+- if (!data_push_tail(rb, &rb->dict_data_ring, desc.dict_blk_lpos.next))
+- return false;
+
+ /*
+ * Check the next descriptor after @tail_id before pushing the tail
+@@ -1347,9 +1314,8 @@ static struct prb_desc *desc_reopen_last
+ * data.
+ *
+ * The writer specifies the text size to extend (not the new total size) by
+- * setting the @text_buf_size field of @r. Extending dictionaries is not
+- * supported, so @dict_buf_size of @r should be set to 0. To ensure proper
+- * initialization of @r, prb_rec_init_wr() should be used.
++ * setting the @text_buf_size field of @r. To ensure proper initialization
++ * of @r, prb_rec_init_wr() should be used.
+ *
+ * This function will fail if @caller_id does not match the caller ID of the
+ * newest record. In that case the caller must reserve new data using
+@@ -1364,9 +1330,6 @@ static struct prb_desc *desc_reopen_last
+ *
+ * - @r->text_buf_size is set to the new total size of the buffer.
+ *
+- * - @r->dict_buf and @r->dict_buf_size are cleared because extending
+- * the dict buffer is not supported.
+- *
+ * - @r->info is not touched so that @r->info->text_len could be used
+ * to append the text.
+ *
+@@ -1375,8 +1338,7 @@ static struct prb_desc *desc_reopen_last
+ *
+ * Important: All @r->info fields will already be set with the current values
+ * for the record. I.e. @r->info->text_len will be less than
+- * @text_buf_size and @r->info->dict_len may be set, even though
+- * @dict_buf_size is 0. Writers can use @r->info->text_len to know
++ * @text_buf_size. Writers can use @r->info->text_len to know
+ * where concatenation begins and writers should update
+ * @r->info->text_len after concatenating.
+ */
+@@ -1454,10 +1416,6 @@ bool prb_reserve_in_last(struct prb_rese
+ if (r->text_buf_size && !r->text_buf)
+ goto fail;
+
+- /* Although dictionary data may be in use, it cannot be extended. */
+- r->dict_buf = NULL;
+- r->dict_buf_size = 0;
+-
+ r->info = info;
+
+ e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos);
+@@ -1494,27 +1452,21 @@ static void desc_make_final(struct prb_d
+ *
+ * This is the public function available to writers to reserve data.
+ *
+- * The writer specifies the text and dict sizes to reserve by setting the
+- * @text_buf_size and @dict_buf_size fields of @r, respectively. Dictionaries
+- * are optional, so @dict_buf_size is allowed to be 0. To ensure proper
+- * initialization of @r, prb_rec_init_wr() should be used.
++ * The writer specifies the text size to reserve by setting the
++ * @text_buf_size field of @r. To ensure proper initialization of @r,
++ * prb_rec_init_wr() should be used.
+ *
+ * Context: Any context. Disables local interrupts on success.
+ * Return: true if at least text data could be allocated, otherwise false.
+ *
+- * On success, the fields @info, @text_buf, @dict_buf of @r will be set by
+- * this function and should be filled in by the writer before committing. Also
++ * On success, the fields @info and @text_buf of @r will be set by this
++ * function and should be filled in by the writer before committing. Also
+ * on success, prb_record_text_space() can be used on @e to query the actual
+ * space used for the text data block.
+ *
+- * If the function fails to reserve dictionary space (but all else succeeded),
+- * it will still report success. In that case @dict_buf is set to NULL and
+- * @dict_buf_size is set to 0. Writers must check this before writing to
+- * dictionary space.
+- *
+- * Important: @info->text_len and @info->dict_len need to be set correctly by
+- * the writer in order for data to be readable and/or extended.
+- * Their values are initialized to 0.
++ * Important: @info->text_len needs to be set correctly by the writer in
++ * order for data to be readable and/or extended. Its value
++ * is initialized to 0.
+ */
+ bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
+ struct printk_record *r)
+@@ -1528,9 +1480,6 @@ bool prb_reserve(struct prb_reserved_ent
+ if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
+ goto fail;
+
+- if (!data_check_size(&rb->dict_data_ring, r->dict_buf_size))
+- goto fail;
+-
+ /*
+ * Descriptors in the reserved state act as blockers to all further
+ * reservations once the desc_ring has fully wrapped. Disable
+@@ -1598,15 +1547,6 @@ bool prb_reserve(struct prb_reserved_ent
+ goto fail;
+ }
+
+- r->dict_buf = data_alloc(rb, &rb->dict_data_ring, r->dict_buf_size,
+- &d->dict_blk_lpos, id);
+- /*
+- * If dict data allocation fails, the caller can still commit
+- * text. But dictionary information will not be available.
+- */
+- if (r->dict_buf_size && !r->dict_buf)
+- r->dict_buf_size = 0;
+-
+ r->info = info;
+
+ /* Record full text space used by record. */
+@@ -1869,17 +1809,6 @@ static int prb_read(struct printk_ringbu
+ return -ENOENT;
+ }
+
+- /*
+- * Copy dict data. Although this should not fail, dict data is not
+- * important. So if it fails, modify the copied meta data to report
+- * that there is no dict data, thus silently dropping the dict data.
+- */
+- if (!copy_data(&rb->dict_data_ring, &desc.dict_blk_lpos, info->dict_len,
+- r->dict_buf, r->dict_buf_size, NULL)) {
+- if (r->info)
+- r->info->dict_len = 0;
+- }
+-
+ /* Ensure the record is still finalized and has the same @seq. */
+ return desc_read_finalized_seq(desc_ring, id, seq, &desc);
+ }
+@@ -1974,7 +1903,7 @@ static bool _prb_read_valid(struct print
+ *
+ * This is the public function available to readers to read a record.
+ *
+- * The reader provides the @info, @text_buf, @dict_buf buffers of @r to be
++ * The reader provides the @info and @text_buf buffers of @r to be
+ * filled in. Any of the buffer pointers can be set to NULL if the reader
+ * is not interested in that data. To ensure proper initialization of @r,
+ * prb_rec_init_rd() should be used.
+@@ -2022,7 +1951,7 @@ bool prb_read_valid_info(struct printk_r
+ {
+ struct printk_record r;
+
+- prb_rec_init_rd(&r, info, NULL, 0, NULL, 0);
++ prb_rec_init_rd(&r, info, NULL, 0);
+
+ return _prb_read_valid(rb, &seq, &r, line_count);
+ }
+@@ -2084,8 +2013,6 @@ u64 prb_next_seq(struct printk_ringbuffe
+ * @rb: The ringbuffer to initialize.
+ * @text_buf: The data buffer for text data.
+ * @textbits: The size of @text_buf as a power-of-2 value.
+- * @dict_buf: The data buffer for dictionary data.
+- * @dictbits: The size of @dict_buf as a power-of-2 value.
+ * @descs: The descriptor buffer for ringbuffer records.
+ * @descbits: The count of @descs items as a power-of-2 value.
+ * @infos: The printk_info buffer for ringbuffer records.
+@@ -2099,7 +2026,6 @@ u64 prb_next_seq(struct printk_ringbuffe
+ */
+ void prb_init(struct printk_ringbuffer *rb,
+ char *text_buf, unsigned int textbits,
+- char *dict_buf, unsigned int dictbits,
+ struct prb_desc *descs, unsigned int descbits,
+ struct printk_info *infos)
+ {
+@@ -2117,18 +2043,11 @@ void prb_init(struct printk_ringbuffer *
+ atomic_long_set(&rb->text_data_ring.head_lpos, BLK0_LPOS(textbits));
+ atomic_long_set(&rb->text_data_ring.tail_lpos, BLK0_LPOS(textbits));
+
+- rb->dict_data_ring.size_bits = dictbits;
+- rb->dict_data_ring.data = dict_buf;
+- atomic_long_set(&rb->dict_data_ring.head_lpos, BLK0_LPOS(dictbits));
+- atomic_long_set(&rb->dict_data_ring.tail_lpos, BLK0_LPOS(dictbits));
+-
+ atomic_long_set(&rb->fail, 0);
+
+ atomic_long_set(&(descs[_DESCS_COUNT(descbits) - 1].state_var), DESC0_SV(descbits));
+ descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.begin = FAILED_LPOS;
+ descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.next = FAILED_LPOS;
+- descs[_DESCS_COUNT(descbits) - 1].dict_blk_lpos.begin = FAILED_LPOS;
+- descs[_DESCS_COUNT(descbits) - 1].dict_blk_lpos.next = FAILED_LPOS;
+
+ infos[0].seq = -(u64)_DESCS_COUNT(descbits);
+ infos[_DESCS_COUNT(descbits) - 1].seq = 0;
+--- a/kernel/printk/printk_ringbuffer.h
++++ b/kernel/printk/printk_ringbuffer.h
+@@ -9,15 +9,13 @@
+ /*
+ * Meta information about each stored message.
+ *
+- * All fields are set and used by the printk code except for
+- * @seq, @text_len, @dict_len, which are set and/or modified
+- * by the ringbuffer code.
++ * All fields are set by the printk code except for @seq, which is
++ * set by the ringbuffer code.
+ */
+ struct printk_info {
+ u64 seq; /* sequence number */
+ u64 ts_nsec; /* timestamp in nanoseconds */
+ u16 text_len; /* length of text message */
+- u16 dict_len; /* length of dictionary message */
+ u8 facility; /* syslog facility */
+ u8 flags:5; /* internal record flags */
+ u8 level:3; /* syslog level */
+@@ -30,23 +28,20 @@ struct printk_info {
+ * A structure providing the buffers, used by writers and readers.
+ *
+ * Writers:
+- * Using prb_rec_init_wr(), a writer sets @text_buf_size and @dict_buf_size
+- * before calling prb_reserve(). On success, prb_reserve() sets @info,
+- * @text_buf, @dict_buf to buffers reserved for that writer.
++ * Using prb_rec_init_wr(), a writer sets @text_buf_size before calling
++ * prb_reserve(). On success, prb_reserve() sets @info and @text_buf to
++ * buffers reserved for that writer.
+ *
+ * Readers:
+ * Using prb_rec_init_rd(), a reader sets all fields before calling
+- * prb_read_valid(). Note that the reader provides the @info, @text_buf,
+- * @dict_buf buffers. On success, the struct pointed to by @info will be
+- * filled and the char arrays pointed to by @text_buf and @dict_buf will
+- * be filled with text and dict data.
++ * prb_read_valid(). Note that the reader provides the @info and @text_buf,
++ * buffers. On success, the struct pointed to by @info will be filled and
++ * the char array pointed to by @text_buf will be filled with text data.
+ */
+ struct printk_record {
+ struct printk_info *info;
+ char *text_buf;
+- char *dict_buf;
+ unsigned int text_buf_size;
+- unsigned int dict_buf_size;
+ };
+
+ /* Specifies the logical position and span of a data block. */
+@@ -63,7 +58,6 @@ struct prb_data_blk_lpos {
+ struct prb_desc {
+ atomic_long_t state_var;
+ struct prb_data_blk_lpos text_blk_lpos;
+- struct prb_data_blk_lpos dict_blk_lpos;
+ };
+
+ /* A ringbuffer of "ID + data" elements. */
+@@ -92,7 +86,6 @@ struct prb_desc_ring {
+ struct printk_ringbuffer {
+ struct prb_desc_ring desc_ring;
+ struct prb_data_ring text_data_ring;
+- struct prb_data_ring dict_data_ring;
+ atomic_long_t fail;
+ };
+
+@@ -236,9 +229,7 @@ enum desc_state {
+ * Note: The specified external buffer must be of the size:
+ * 2 ^ (descbits + avgtextbits)
+ */
+-#define _DEFINE_PRINTKRB(name, descbits, avgtextbits, avgdictbits, text_buf) \
+-static char _##name##_dict[1U << ((avgdictbits) + (descbits))] \
+- __aligned(__alignof__(unsigned long)); \
++#define _DEFINE_PRINTKRB(name, descbits, avgtextbits, text_buf) \
+ static struct prb_desc _##name##_descs[_DESCS_COUNT(descbits)] = { \
+ /* the initial head and tail */ \
+ [_DESCS_COUNT(descbits) - 1] = { \
+@@ -246,7 +237,6 @@ static struct prb_desc _##name##_descs[_
+ .state_var = ATOMIC_INIT(DESC0_SV(descbits)), \
+ /* no associated data block */ \
+ .text_blk_lpos = FAILED_BLK_LPOS, \
+- .dict_blk_lpos = FAILED_BLK_LPOS, \
+ }, \
+ }; \
+ static struct printk_info _##name##_infos[_DESCS_COUNT(descbits)] = { \
+@@ -275,12 +265,6 @@ static struct printk_ringbuffer name = {
+ .head_lpos = ATOMIC_LONG_INIT(BLK0_LPOS((avgtextbits) + (descbits))), \
+ .tail_lpos = ATOMIC_LONG_INIT(BLK0_LPOS((avgtextbits) + (descbits))), \
+ }, \
+- .dict_data_ring = { \
+- .size_bits = (avgtextbits) + (descbits), \
+- .data = &_##name##_dict[0], \
+- .head_lpos = ATOMIC_LONG_INIT(BLK0_LPOS((avgtextbits) + (descbits))), \
+- .tail_lpos = ATOMIC_LONG_INIT(BLK0_LPOS((avgtextbits) + (descbits))), \
+- }, \
+ .fail = ATOMIC_LONG_INIT(0), \
+ }
+
+@@ -290,17 +274,15 @@ static struct printk_ringbuffer name = {
+ * @name: The name of the ringbuffer variable.
+ * @descbits: The number of descriptors as a power-of-2 value.
+ * @avgtextbits: The average text data size per record as a power-of-2 value.
+- * @avgdictbits: The average dictionary data size per record as a
+- * power-of-2 value.
+ *
+ * This is a macro for defining a ringbuffer and all internal structures
+ * such that it is ready for immediate use. See _DEFINE_PRINTKRB() for a
+ * variant where the text data buffer can be specified externally.
+ */
+-#define DEFINE_PRINTKRB(name, descbits, avgtextbits, avgdictbits) \
++#define DEFINE_PRINTKRB(name, descbits, avgtextbits) \
+ static char _##name##_text[1U << ((avgtextbits) + (descbits))] \
+ __aligned(__alignof__(unsigned long)); \
+-_DEFINE_PRINTKRB(name, descbits, avgtextbits, avgdictbits, &_##name##_text[0])
++_DEFINE_PRINTKRB(name, descbits, avgtextbits, &_##name##_text[0])
+
+ /* Writer Interface */
+
+@@ -309,26 +291,13 @@ static char _##name##_text[1U << ((avgte
+ *
+ * @r: The record to initialize.
+ * @text_buf_size: The needed text buffer size.
+- * @dict_buf_size: The needed dictionary buffer size.
+- *
+- * Initialize all the fields that a writer is interested in. If
+- * @dict_buf_size is 0, a dictionary buffer will not be reserved.
+- * @text_buf_size must be greater than 0.
+- *
+- * Note that although @dict_buf_size may be initialized to non-zero,
+- * its value must be rechecked after a successful call to prb_reserve()
+- * to verify a dictionary buffer was actually reserved. Dictionary buffer
+- * reservation is allowed to fail.
+ */
+ static inline void prb_rec_init_wr(struct printk_record *r,
+- unsigned int text_buf_size,
+- unsigned int dict_buf_size)
++ unsigned int text_buf_size)
+ {
+ r->info = NULL;
+ r->text_buf = NULL;
+- r->dict_buf = NULL;
+ r->text_buf_size = text_buf_size;
+- r->dict_buf_size = dict_buf_size;
+ }
+
+ bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
+@@ -340,7 +309,6 @@ void prb_final_commit(struct prb_reserve
+
+ void prb_init(struct printk_ringbuffer *rb,
+ char *text_buf, unsigned int text_buf_size,
+- char *dict_buf, unsigned int dict_buf_size,
+ struct prb_desc *descs, unsigned int descs_count_bits,
+ struct printk_info *infos);
+ unsigned int prb_record_text_space(struct prb_reserved_entry *e);
+@@ -354,8 +322,6 @@ unsigned int prb_record_text_space(struc
+ * @info: A buffer to store record meta-data.
+ * @text_buf: A buffer to store text data.
+ * @text_buf_size: The size of @text_buf.
+- * @dict_buf: A buffer to store dictionary data.
+- * @dict_buf_size: The size of @dict_buf.
+ *
+ * Initialize all the fields that a reader is interested in. All arguments
+ * (except @r) are optional. Only record data for arguments that are
+@@ -363,14 +329,11 @@ unsigned int prb_record_text_space(struc
+ */
+ static inline void prb_rec_init_rd(struct printk_record *r,
+ struct printk_info *info,
+- char *text_buf, unsigned int text_buf_size,
+- char *dict_buf, unsigned int dict_buf_size)
++ char *text_buf, unsigned int text_buf_size)
+ {
+ r->info = info;
+ r->text_buf = text_buf;
+- r->dict_buf = dict_buf;
+ r->text_buf_size = text_buf_size;
+- r->dict_buf_size = dict_buf_size;
+ }
+
+ /**
diff --git a/debian/patches-rt/0023-locking-rtmutex-Use-custom-scheduling-function-for-s.patch b/debian/patches-rt/0022-locking-rtmutex-Use-custom-scheduling-function-for-s.patch
index 4ef80e09a..b96f3b258 100644
--- a/debian/patches-rt/0023-locking-rtmutex-Use-custom-scheduling-function-for-s.patch
+++ b/debian/patches-rt/0022-locking-rtmutex-Use-custom-scheduling-function-for-s.patch
@@ -1,8 +1,8 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 6 Oct 2020 13:07:17 +0200
-Subject: [PATCH 23/23] locking/rtmutex: Use custom scheduling function for
+Subject: [PATCH 22/22] locking/rtmutex: Use custom scheduling function for
spin-schedule()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
PREEMPT_RT builds the rwsem, mutex, spinlock and rwlock typed locks on
top of a rtmutex lock. While blocked task->pi_blocked_on is set
@@ -121,7 +121,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4813,7 +4813,7 @@ pick_next_task(struct rq *rq, struct tas
+@@ -4957,7 +4957,7 @@ pick_next_task(struct rq *rq, struct tas
*
* WARNING: must be called with preemption disabled!
*/
@@ -130,7 +130,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct task_struct *prev, *next;
unsigned long *switch_count;
-@@ -4866,7 +4866,7 @@ static void __sched notrace __schedule(b
+@@ -5010,7 +5010,7 @@ static void __sched notrace __schedule(b
* - ptrace_{,un}freeze_traced() can change ->state underneath us.
*/
prev_state = prev->state;
@@ -139,7 +139,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (signal_pending_state(prev_state, prev)) {
prev->state = TASK_RUNNING;
} else {
-@@ -4950,7 +4950,7 @@ void __noreturn do_task_dead(void)
+@@ -5094,7 +5094,7 @@ void __noreturn do_task_dead(void)
/* Tell freezer to ignore us: */
current->flags |= PF_NOFREEZE;
@@ -148,7 +148,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
BUG();
/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
-@@ -4980,9 +4980,6 @@ static inline void sched_submit_work(str
+@@ -5124,9 +5124,6 @@ static inline void sched_submit_work(str
preempt_enable_no_resched();
}
@@ -158,7 +158,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
-@@ -5008,7 +5005,7 @@ asmlinkage __visible void __sched schedu
+@@ -5152,7 +5149,7 @@ asmlinkage __visible void __sched schedu
sched_submit_work(tsk);
do {
preempt_disable();
@@ -167,7 +167,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
sched_preempt_enable_no_resched();
} while (need_resched());
sched_update_worker(tsk);
-@@ -5036,7 +5033,7 @@ void __sched schedule_idle(void)
+@@ -5180,7 +5177,7 @@ void __sched schedule_idle(void)
*/
WARN_ON_ONCE(current->state);
do {
@@ -176,7 +176,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} while (need_resched());
}
-@@ -5089,7 +5086,7 @@ static void __sched notrace preempt_sche
+@@ -5233,7 +5230,7 @@ static void __sched notrace preempt_sche
*/
preempt_disable_notrace();
preempt_latency_start(1);
@@ -185,7 +185,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_latency_stop(1);
preempt_enable_no_resched_notrace();
-@@ -5119,6 +5116,19 @@ asmlinkage __visible void __sched notrac
+@@ -5263,6 +5260,19 @@ asmlinkage __visible void __sched notrac
NOKPROBE_SYMBOL(preempt_schedule);
EXPORT_SYMBOL(preempt_schedule);
@@ -205,7 +205,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* preempt_schedule_notrace - preempt_schedule called by tracing
*
-@@ -5162,7 +5172,7 @@ asmlinkage __visible void __sched notrac
+@@ -5306,7 +5316,7 @@ asmlinkage __visible void __sched notrac
* an infinite recursion.
*/
prev_ctx = exception_enter();
@@ -214,7 +214,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
exception_exit(prev_ctx);
preempt_latency_stop(1);
-@@ -5191,7 +5201,7 @@ asmlinkage __visible void __sched preemp
+@@ -5335,7 +5345,7 @@ asmlinkage __visible void __sched preemp
do {
preempt_disable();
local_irq_enable();
diff --git a/debian/patches-rt/0022-printk-avoid-and-or-handle-record-truncation.patch b/debian/patches-rt/0022-printk-avoid-and-or-handle-record-truncation.patch
new file mode 100644
index 000000000..60afdaba6
--- /dev/null
+++ b/debian/patches-rt/0022-printk-avoid-and-or-handle-record-truncation.patch
@@ -0,0 +1,119 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 30 Sep 2020 11:07:33 +0206
+Subject: [PATCH 22/25] printk: avoid and/or handle record truncation
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+If a reader provides a buffer that is smaller than the message text,
+the @text_len field of @info will have a value larger than the buffer
+size. If readers blindly read @text_len bytes of data without
+checking the size, they will read beyond their buffer.
+
+Add this check to record_print_text() to properly recognize when such
+truncation has occurred.
+
+Add a maximum size argument to the ringbuffer function to extend
+records so that records can not be created that are larger than the
+buffer size of readers.
+
+When extending records (LOG_CONT), do not extend records beyond
+LOG_LINE_MAX since that is the maximum size available in the buffers
+used by consoles and syslog.
+
+Fixes: f5f022e53b87 ("printk: reimplement log_cont using record extension")
+Reported-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200930090134.8723-2-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 9 ++++++++-
+ kernel/printk/printk_ringbuffer.c | 12 ++++++++++--
+ kernel/printk/printk_ringbuffer.h | 2 +-
+ 3 files changed, 19 insertions(+), 4 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1357,6 +1357,13 @@ static size_t record_print_text(struct p
+ size_t len = 0;
+ char *next;
+
++ /*
++ * If the message was truncated because the buffer was not large
++ * enough, treat the available text as if it were the full text.
++ */
++ if (text_len > buf_size)
++ text_len = buf_size;
++
+ prefix_len = info_print_prefix(r->info, syslog, time, prefix);
+
+ /*
+@@ -1911,7 +1918,7 @@ static size_t log_output(int facility, i
+ struct printk_record r;
+
+ prb_rec_init_wr(&r, text_len);
+- if (prb_reserve_in_last(&e, prb, &r, caller_id)) {
++ if (prb_reserve_in_last(&e, prb, &r, caller_id, LOG_LINE_MAX)) {
+ memcpy(&r.text_buf[r.info->text_len], text, text_len);
+ r.info->text_len += text_len;
+ if (lflags & LOG_NEWLINE) {
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -202,7 +202,8 @@
+ * // specify additional 5 bytes text space to extend
+ * prb_rec_init_wr(&r, 5);
+ *
+- * if (prb_reserve_in_last(&e, &test_rb, &r, printk_caller_id())) {
++ * // try to extend, but only if it does not exceed 32 bytes
++ * if (prb_reserve_in_last(&e, &test_rb, &r, printk_caller_id()), 32) {
+ * snprintf(&r.text_buf[r.info->text_len],
+ * r.text_buf_size - r.info->text_len, "hello");
+ *
+@@ -1309,6 +1310,7 @@ static struct prb_desc *desc_reopen_last
+ * @rb: The ringbuffer to re-reserve and extend data in.
+ * @r: The record structure to allocate buffers for.
+ * @caller_id: The caller ID of the caller (reserving writer).
++ * @max_size: Fail if the extended size would be greater than this.
+ *
+ * This is the public function available to writers to re-reserve and extend
+ * data.
+@@ -1343,7 +1345,7 @@ static struct prb_desc *desc_reopen_last
+ * @r->info->text_len after concatenating.
+ */
+ bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
+- struct printk_record *r, u32 caller_id)
++ struct printk_record *r, u32 caller_id, unsigned int max_size)
+ {
+ struct prb_desc_ring *desc_ring = &rb->desc_ring;
+ struct printk_info *info;
+@@ -1389,6 +1391,9 @@ bool prb_reserve_in_last(struct prb_rese
+ if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
+ goto fail;
+
++ if (r->text_buf_size > max_size)
++ goto fail;
++
+ r->text_buf = data_alloc(rb, &rb->text_data_ring, r->text_buf_size,
+ &d->text_blk_lpos, id);
+ } else {
+@@ -1410,6 +1415,9 @@ bool prb_reserve_in_last(struct prb_rese
+ if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
+ goto fail;
+
++ if (r->text_buf_size > max_size)
++ goto fail;
++
+ r->text_buf = data_realloc(rb, &rb->text_data_ring, r->text_buf_size,
+ &d->text_blk_lpos, id);
+ }
+--- a/kernel/printk/printk_ringbuffer.h
++++ b/kernel/printk/printk_ringbuffer.h
+@@ -303,7 +303,7 @@ static inline void prb_rec_init_wr(struc
+ bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
+ struct printk_record *r);
+ bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
+- struct printk_record *r, u32 caller_id);
++ struct printk_record *r, u32 caller_id, unsigned int max_size);
+ void prb_commit(struct prb_reserved_entry *e);
+ void prb_final_commit(struct prb_reserved_entry *e);
+
diff --git a/debian/patches-rt/0022-printk-implement-dev-kmsg.patch b/debian/patches-rt/0022-printk-implement-dev-kmsg.patch
deleted file mode 100644
index 4e0c1ad91..000000000
--- a/debian/patches-rt/0022-printk-implement-dev-kmsg.patch
+++ /dev/null
@@ -1,305 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:30:00 +0100
-Subject: [PATCH 22/25] printk: implement /dev/kmsg
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-Since printk messages are now logged to a new ring buffer, update
-the /dev/kmsg functions to pull the messages from there.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- fs/proc/kmsg.c | 4 -
- include/linux/printk.h | 1
- kernel/printk/printk.c | 162 +++++++++++++++++++++++++++++++++----------------
- 3 files changed, 113 insertions(+), 54 deletions(-)
-
---- a/fs/proc/kmsg.c
-+++ b/fs/proc/kmsg.c
-@@ -18,8 +18,6 @@
- #include <linux/uaccess.h>
- #include <asm/io.h>
-
--extern wait_queue_head_t log_wait;
--
- static int kmsg_open(struct inode * inode, struct file * file)
- {
- return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_PROC);
-@@ -42,7 +40,7 @@ static ssize_t kmsg_read(struct file *fi
-
- static __poll_t kmsg_poll(struct file *file, poll_table *wait)
- {
-- poll_wait(file, &log_wait, wait);
-+ poll_wait(file, printk_wait_queue(), wait);
- if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC))
- return EPOLLIN | EPOLLRDNORM;
- return 0;
---- a/include/linux/printk.h
-+++ b/include/linux/printk.h
-@@ -193,6 +193,7 @@ void __init setup_log_buf(int early);
- void dump_stack_print_info(const char *log_lvl);
- void show_regs_print_info(const char *log_lvl);
- extern asmlinkage void dump_stack(void) __cold;
-+struct wait_queue_head *printk_wait_queue(void);
- #else
- static inline __printf(1, 0)
- int vprintk(const char *s, va_list args)
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -673,10 +673,11 @@ static ssize_t msg_print_ext_body(char *
- /* /dev/kmsg - userspace message inject/listen interface */
- struct devkmsg_user {
- u64 seq;
-- u32 idx;
-+ struct prb_iterator iter;
- struct ratelimit_state rs;
- struct mutex lock;
- char buf[CONSOLE_EXT_LOG_MAX];
-+ char msgbuf[PRINTK_RECORD_MAX];
- };
-
- static __printf(3, 4) __cold
-@@ -759,9 +760,11 @@ static ssize_t devkmsg_read(struct file
- size_t count, loff_t *ppos)
- {
- struct devkmsg_user *user = file->private_data;
-+ struct prb_iterator backup_iter;
- struct printk_log *msg;
-- size_t len;
- ssize_t ret;
-+ size_t len;
-+ u64 seq;
-
- if (!user)
- return -EBADF;
-@@ -770,52 +773,67 @@ static ssize_t devkmsg_read(struct file
- if (ret)
- return ret;
-
-- logbuf_lock_irq();
-- while (user->seq == log_next_seq) {
-- if (file->f_flags & O_NONBLOCK) {
-- ret = -EAGAIN;
-- logbuf_unlock_irq();
-- goto out;
-- }
-+ /* make a backup copy in case there is a problem */
-+ prb_iter_copy(&backup_iter, &user->iter);
-
-- logbuf_unlock_irq();
-- ret = wait_event_interruptible(log_wait,
-- user->seq != log_next_seq);
-- if (ret)
-- goto out;
-- logbuf_lock_irq();
-+ if (file->f_flags & O_NONBLOCK) {
-+ ret = prb_iter_next(&user->iter, &user->msgbuf[0],
-+ sizeof(user->msgbuf), &seq);
-+ } else {
-+ ret = prb_iter_wait_next(&user->iter, &user->msgbuf[0],
-+ sizeof(user->msgbuf), &seq);
- }
--
-- if (user->seq < log_first_seq) {
-- /* our last seen message is gone, return error and reset */
-- user->idx = log_first_idx;
-- user->seq = log_first_seq;
-+ if (ret == 0) {
-+ /* end of list */
-+ ret = -EAGAIN;
-+ goto out;
-+ } else if (ret == -EINVAL) {
-+ /* iterator invalid, return error and reset */
- ret = -EPIPE;
-- logbuf_unlock_irq();
-+ prb_iter_init(&user->iter, &printk_rb, &user->seq);
-+ goto out;
-+ } else if (ret < 0) {
-+ /* interrupted by signal */
- goto out;
- }
-
-- msg = log_from_idx(user->idx);
-+ if (user->seq == 0) {
-+ user->seq = seq;
-+ } else {
-+ user->seq++;
-+ if (user->seq < seq) {
-+ ret = -EPIPE;
-+ goto restore_out;
-+ }
-+ }
-+
-+ msg = (struct printk_log *)&user->msgbuf[0];
- len = msg_print_ext_header(user->buf, sizeof(user->buf),
- msg, user->seq);
- len += msg_print_ext_body(user->buf + len, sizeof(user->buf) - len,
- log_dict(msg), msg->dict_len,
- log_text(msg), msg->text_len);
-
-- user->idx = log_next(user->idx);
-- user->seq++;
-- logbuf_unlock_irq();
--
- if (len > count) {
- ret = -EINVAL;
-- goto out;
-+ goto restore_out;
- }
-
- if (copy_to_user(buf, user->buf, len)) {
- ret = -EFAULT;
-- goto out;
-+ goto restore_out;
- }
-+
- ret = len;
-+ goto out;
-+restore_out:
-+ /*
-+ * There was an error, but this message should not be
-+ * lost because of it. Restore the backup and setup
-+ * seq so that it will work with the next read.
-+ */
-+ prb_iter_copy(&user->iter, &backup_iter);
-+ user->seq = seq - 1;
- out:
- mutex_unlock(&user->lock);
- return ret;
-@@ -832,19 +850,21 @@ static ssize_t devkmsg_read(struct file
- static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
- {
- struct devkmsg_user *user = file->private_data;
-- loff_t ret = 0;
-+ loff_t ret;
-
- if (!user)
- return -EBADF;
- if (offset)
- return -ESPIPE;
-
-- logbuf_lock_irq();
-+ ret = mutex_lock_interruptible(&user->lock);
-+ if (ret)
-+ return ret;
-+
- switch (whence) {
- case SEEK_SET:
- /* the first record */
-- user->idx = log_first_idx;
-- user->seq = log_first_seq;
-+ prb_iter_init(&user->iter, &printk_rb, &user->seq);
- break;
- case SEEK_DATA:
- /*
-@@ -852,40 +872,83 @@ static loff_t devkmsg_llseek(struct file
- * like issued by 'dmesg -c'. Reading /dev/kmsg itself
- * changes no global state, and does not clear anything.
- */
-- user->idx = clear_idx;
-- user->seq = clear_seq;
-+ for (;;) {
-+ prb_iter_init(&user->iter, &printk_rb, NULL);
-+ ret = prb_iter_seek(&user->iter, clear_seq);
-+ if (ret > 0) {
-+ /* seeked to clear seq */
-+ user->seq = clear_seq;
-+ break;
-+ } else if (ret == 0) {
-+ /*
-+ * The end of the list was hit without
-+ * ever seeing the clear seq. Just
-+ * seek to the beginning of the list.
-+ */
-+ prb_iter_init(&user->iter, &printk_rb,
-+ &user->seq);
-+ break;
-+ }
-+ /* iterator invalid, start over */
-+ }
-+ ret = 0;
- break;
- case SEEK_END:
- /* after the last record */
-- user->idx = log_next_idx;
-- user->seq = log_next_seq;
-+ for (;;) {
-+ ret = prb_iter_next(&user->iter, NULL, 0, &user->seq);
-+ if (ret == 0)
-+ break;
-+ else if (ret > 0)
-+ continue;
-+ /* iterator invalid, start over */
-+ prb_iter_init(&user->iter, &printk_rb, &user->seq);
-+ }
-+ ret = 0;
- break;
- default:
- ret = -EINVAL;
- }
-- logbuf_unlock_irq();
-+
-+ mutex_unlock(&user->lock);
- return ret;
- }
-
-+struct wait_queue_head *printk_wait_queue(void)
-+{
-+ /* FIXME: using prb internals! */
-+ return printk_rb.wq;
-+}
-+
- static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
- {
- struct devkmsg_user *user = file->private_data;
-+ struct prb_iterator iter;
- __poll_t ret = 0;
-+ int rbret;
-+ u64 seq;
-
- if (!user)
- return EPOLLERR|EPOLLNVAL;
-
-- poll_wait(file, &log_wait, wait);
-+ poll_wait(file, printk_wait_queue(), wait);
-
-- logbuf_lock_irq();
-- if (user->seq < log_next_seq) {
-- /* return error when data has vanished underneath us */
-- if (user->seq < log_first_seq)
-- ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
-- else
-- ret = EPOLLIN|EPOLLRDNORM;
-- }
-- logbuf_unlock_irq();
-+ mutex_lock(&user->lock);
-+
-+ /* use copy so no actual iteration takes place */
-+ prb_iter_copy(&iter, &user->iter);
-+
-+ rbret = prb_iter_next(&iter, &user->msgbuf[0],
-+ sizeof(user->msgbuf), &seq);
-+ if (rbret == 0)
-+ goto out;
-+
-+ ret = EPOLLIN|EPOLLRDNORM;
-+
-+ if (rbret < 0 || (seq - user->seq) != 1)
-+ ret |= EPOLLERR|EPOLLPRI;
-+out:
-+ mutex_unlock(&user->lock);
-
- return ret;
- }
-@@ -915,10 +978,7 @@ static int devkmsg_open(struct inode *in
-
- mutex_init(&user->lock);
-
-- logbuf_lock_irq();
-- user->idx = log_first_idx;
-- user->seq = log_first_seq;
-- logbuf_unlock_irq();
-+ prb_iter_init(&user->iter, &printk_rb, &user->seq);
-
- file->private_data = user;
- return 0;
diff --git a/debian/patches-rt/0023-printk-implement-syslog.patch b/debian/patches-rt/0023-printk-implement-syslog.patch
deleted file mode 100644
index 5eee6133d..000000000
--- a/debian/patches-rt/0023-printk-implement-syslog.patch
+++ /dev/null
@@ -1,494 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:30:01 +0100
-Subject: [PATCH 23/25] printk: implement syslog
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-Since printk messages are now logged to a new ring buffer, update
-the syslog functions to pull the messages from there.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk.c | 342 +++++++++++++++++++++++++++++++++----------------
- 1 file changed, 236 insertions(+), 106 deletions(-)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -407,10 +407,12 @@ DECLARE_STATIC_PRINTKRB_CPULOCK(printk_c
- /* record buffer */
- DECLARE_STATIC_PRINTKRB(printk_rb, CONFIG_LOG_BUF_SHIFT, &printk_cpulock);
-
-+static DEFINE_MUTEX(syslog_lock);
-+DECLARE_STATIC_PRINTKRB_ITER(syslog_iter, &printk_rb);
-+
- DECLARE_WAIT_QUEUE_HEAD(log_wait);
- /* the next printk record to read by syslog(READ) or /proc/kmsg */
- static u64 syslog_seq;
--static u32 syslog_idx;
- static size_t syslog_partial;
- static bool syslog_time;
-
-@@ -1311,30 +1313,42 @@ static size_t msg_print_text(const struc
- return len;
- }
-
--static int syslog_print(char __user *buf, int size)
-+static int syslog_print(char __user *buf, int size, char *text,
-+ char *msgbuf, int *locked)
- {
-- char *text;
-+ struct prb_iterator iter;
- struct printk_log *msg;
- int len = 0;
--
-- text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
-- if (!text)
-- return -ENOMEM;
-+ u64 seq;
-+ int ret;
-
- while (size > 0) {
- size_t n;
- size_t skip;
-
-- logbuf_lock_irq();
-- if (syslog_seq < log_first_seq) {
-- /* messages are gone, move to first one */
-- syslog_seq = log_first_seq;
-- syslog_idx = log_first_idx;
-- syslog_partial = 0;
-+ for (;;) {
-+ prb_iter_copy(&iter, &syslog_iter);
-+ ret = prb_iter_next(&iter, msgbuf,
-+ PRINTK_RECORD_MAX, &seq);
-+ if (ret < 0) {
-+ /* messages are gone, move to first one */
-+ prb_iter_init(&syslog_iter, &printk_rb,
-+ &syslog_seq);
-+ syslog_partial = 0;
-+ continue;
-+ }
-+ break;
- }
-- if (syslog_seq == log_next_seq) {
-- logbuf_unlock_irq();
-+ if (ret == 0)
- break;
-+
-+ /*
-+ * If messages have been missed, the partial tracker
-+ * is no longer valid and must be reset.
-+ */
-+ if (syslog_seq > 0 && seq - 1 != syslog_seq) {
-+ syslog_seq = seq - 1;
-+ syslog_partial = 0;
- }
-
- /*
-@@ -1344,131 +1358,212 @@ static int syslog_print(char __user *buf
- if (!syslog_partial)
- syslog_time = printk_time;
-
-+ msg = (struct printk_log *)msgbuf;
-+
- skip = syslog_partial;
-- msg = log_from_idx(syslog_idx);
- n = msg_print_text(msg, true, syslog_time, text,
-- LOG_LINE_MAX + PREFIX_MAX);
-+ PRINTK_SPRINT_MAX);
- if (n - syslog_partial <= size) {
- /* message fits into buffer, move forward */
-- syslog_idx = log_next(syslog_idx);
-- syslog_seq++;
-+ prb_iter_next(&syslog_iter, NULL, 0, &syslog_seq);
- n -= syslog_partial;
- syslog_partial = 0;
-- } else if (!len){
-+ } else if (!len) {
- /* partial read(), remember position */
- n = size;
- syslog_partial += n;
- } else
- n = 0;
-- logbuf_unlock_irq();
-
- if (!n)
- break;
-
-+ mutex_unlock(&syslog_lock);
- if (copy_to_user(buf, text + skip, n)) {
- if (!len)
- len = -EFAULT;
-+ *locked = 0;
- break;
- }
-+ ret = mutex_lock_interruptible(&syslog_lock);
-
- len += n;
- size -= n;
- buf += n;
-+
-+ if (ret) {
-+ if (!len)
-+ len = ret;
-+ *locked = 0;
-+ break;
-+ }
- }
-
-- kfree(text);
- return len;
- }
-
--static int syslog_print_all(char __user *buf, int size, bool clear)
-+static int count_remaining(struct prb_iterator *iter, u64 until_seq,
-+ char *msgbuf, int size, bool records, bool time)
- {
-- char *text;
-+ struct prb_iterator local_iter;
-+ struct printk_log *msg;
- int len = 0;
-- u64 next_seq;
- u64 seq;
-- u32 idx;
-+ int ret;
-+
-+ prb_iter_copy(&local_iter, iter);
-+ for (;;) {
-+ ret = prb_iter_next(&local_iter, msgbuf, size, &seq);
-+ if (ret == 0) {
-+ break;
-+ } else if (ret < 0) {
-+ /* the iter is invalid, restart from head */
-+ prb_iter_init(&local_iter, &printk_rb, NULL);
-+ len = 0;
-+ continue;
-+ }
-+
-+ if (until_seq && seq >= until_seq)
-+ break;
-+
-+ if (records) {
-+ len++;
-+ } else {
-+ msg = (struct printk_log *)msgbuf;
-+ len += msg_print_text(msg, true, time, NULL, 0);
-+ }
-+ }
-+
-+ return len;
-+}
-+
-+static void syslog_clear(void)
-+{
-+ struct prb_iterator iter;
-+ int ret;
-+
-+ prb_iter_init(&iter, &printk_rb, &clear_seq);
-+ for (;;) {
-+ ret = prb_iter_next(&iter, NULL, 0, &clear_seq);
-+ if (ret == 0)
-+ break;
-+ else if (ret < 0)
-+ prb_iter_init(&iter, &printk_rb, &clear_seq);
-+ }
-+}
-+
-+static int syslog_print_all(char __user *buf, int size, bool clear)
-+{
-+ struct prb_iterator iter;
-+ struct printk_log *msg;
-+ char *msgbuf = NULL;
-+ char *text = NULL;
-+ int textlen;
-+ u64 seq = 0;
-+ int len = 0;
- bool time;
-+ int ret;
-
-- text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
-+ text = kmalloc(PRINTK_SPRINT_MAX, GFP_KERNEL);
- if (!text)
- return -ENOMEM;
-+ msgbuf = kmalloc(PRINTK_RECORD_MAX, GFP_KERNEL);
-+ if (!msgbuf) {
-+ kfree(text);
-+ return -ENOMEM;
-+ }
-
- time = printk_time;
-- logbuf_lock_irq();
-+
- /*
-- * Find first record that fits, including all following records,
-- * into the user-provided buffer for this dump.
-+ * Setup iter to last event before clear. Clear may
-+ * be lost, but keep going with a best effort.
- */
-- seq = clear_seq;
-- idx = clear_idx;
-- while (seq < log_next_seq) {
-- struct printk_log *msg = log_from_idx(idx);
--
-- len += msg_print_text(msg, true, time, NULL, 0);
-- idx = log_next(idx);
-- seq++;
-- }
-+ prb_iter_init(&iter, &printk_rb, NULL);
-+ prb_iter_seek(&iter, clear_seq);
-
-- /* move first record forward until length fits into the buffer */
-- seq = clear_seq;
-- idx = clear_idx;
-- while (len > size && seq < log_next_seq) {
-- struct printk_log *msg = log_from_idx(idx);
-+ /* count the total bytes after clear */
-+ len = count_remaining(&iter, 0, msgbuf, PRINTK_RECORD_MAX,
-+ false, time);
-+
-+ /* move iter forward until length fits into the buffer */
-+ while (len > size) {
-+ ret = prb_iter_next(&iter, msgbuf,
-+ PRINTK_RECORD_MAX, &seq);
-+ if (ret == 0) {
-+ break;
-+ } else if (ret < 0) {
-+ /*
-+ * The iter is now invalid so clear will
-+ * also be invalid. Restart from the head.
-+ */
-+ prb_iter_init(&iter, &printk_rb, NULL);
-+ len = count_remaining(&iter, 0, msgbuf,
-+ PRINTK_RECORD_MAX, false, time);
-+ continue;
-+ }
-
-+ msg = (struct printk_log *)msgbuf;
- len -= msg_print_text(msg, true, time, NULL, 0);
-- idx = log_next(idx);
-- seq++;
-- }
-
-- /* last message fitting into this dump */
-- next_seq = log_next_seq;
-+ if (clear)
-+ clear_seq = seq;
-+ }
-
-+ /* copy messages to buffer */
- len = 0;
-- while (len >= 0 && seq < next_seq) {
-- struct printk_log *msg = log_from_idx(idx);
-- int textlen = msg_print_text(msg, true, time, text,
-- LOG_LINE_MAX + PREFIX_MAX);
-+ while (len >= 0 && len < size) {
-+ if (clear)
-+ clear_seq = seq;
-
-- idx = log_next(idx);
-- seq++;
-+ ret = prb_iter_next(&iter, msgbuf,
-+ PRINTK_RECORD_MAX, &seq);
-+ if (ret == 0) {
-+ break;
-+ } else if (ret < 0) {
-+ /*
-+ * The iter is now invalid. Make a best
-+ * effort to grab the rest of the log
-+ * from the new head.
-+ */
-+ prb_iter_init(&iter, &printk_rb, NULL);
-+ continue;
-+ }
-+
-+ msg = (struct printk_log *)msgbuf;
-+ textlen = msg_print_text(msg, true, time, text,
-+ PRINTK_SPRINT_MAX);
-+ if (textlen < 0) {
-+ len = textlen;
-+ break;
-+ }
-
-- logbuf_unlock_irq();
- if (copy_to_user(buf + len, text, textlen))
- len = -EFAULT;
- else
- len += textlen;
-- logbuf_lock_irq();
--
-- if (seq < log_first_seq) {
-- /* messages are gone, move to next one */
-- seq = log_first_seq;
-- idx = log_first_idx;
-- }
- }
-
-- if (clear) {
-- clear_seq = log_next_seq;
-- clear_idx = log_next_idx;
-- }
-- logbuf_unlock_irq();
-+ if (clear && !seq)
-+ syslog_clear();
-
-- kfree(text);
-+ if (text)
-+ kfree(text);
-+ if (msgbuf)
-+ kfree(msgbuf);
- return len;
- }
-
--static void syslog_clear(void)
--{
-- logbuf_lock_irq();
-- clear_seq = log_next_seq;
-- clear_idx = log_next_idx;
-- logbuf_unlock_irq();
--}
--
- int do_syslog(int type, char __user *buf, int len, int source)
- {
- bool clear = false;
- static int saved_console_loglevel = LOGLEVEL_DEFAULT;
-+ struct prb_iterator iter;
-+ char *msgbuf = NULL;
-+ char *text = NULL;
-+ int locked;
- int error;
-+ int ret;
-
- error = check_syslog_permissions(type, source);
- if (error)
-@@ -1486,11 +1581,49 @@ int do_syslog(int type, char __user *buf
- return 0;
- if (!access_ok(buf, len))
- return -EFAULT;
-- error = wait_event_interruptible(log_wait,
-- syslog_seq != log_next_seq);
-+
-+ text = kmalloc(PRINTK_SPRINT_MAX, GFP_KERNEL);
-+ msgbuf = kmalloc(PRINTK_RECORD_MAX, GFP_KERNEL);
-+ if (!text || !msgbuf) {
-+ error = -ENOMEM;
-+ goto out;
-+ }
-+
-+ error = mutex_lock_interruptible(&syslog_lock);
- if (error)
-- return error;
-- error = syslog_print(buf, len);
-+ goto out;
-+
-+ /*
-+ * Wait until a first message is available. Use a copy
-+ * because no iteration should occur for syslog now.
-+ */
-+ for (;;) {
-+ prb_iter_copy(&iter, &syslog_iter);
-+
-+ mutex_unlock(&syslog_lock);
-+ ret = prb_iter_wait_next(&iter, NULL, 0, NULL);
-+ if (ret == -ERESTARTSYS) {
-+ error = ret;
-+ goto out;
-+ }
-+ error = mutex_lock_interruptible(&syslog_lock);
-+ if (error)
-+ goto out;
-+
-+ if (ret == -EINVAL) {
-+ prb_iter_init(&syslog_iter, &printk_rb,
-+ &syslog_seq);
-+ syslog_partial = 0;
-+ continue;
-+ }
-+ break;
-+ }
-+
-+ /* print as much as will fit in the user buffer */
-+ locked = 1;
-+ error = syslog_print(buf, len, text, msgbuf, &locked);
-+ if (locked)
-+ mutex_unlock(&syslog_lock);
- break;
- /* Read/clear last kernel messages */
- case SYSLOG_ACTION_READ_CLEAR:
-@@ -1535,47 +1668,45 @@ int do_syslog(int type, char __user *buf
- break;
- /* Number of chars in the log buffer */
- case SYSLOG_ACTION_SIZE_UNREAD:
-- logbuf_lock_irq();
-- if (syslog_seq < log_first_seq) {
-- /* messages are gone, move to first one */
-- syslog_seq = log_first_seq;
-- syslog_idx = log_first_idx;
-- syslog_partial = 0;
-- }
-+ msgbuf = kmalloc(PRINTK_RECORD_MAX, GFP_KERNEL);
-+ if (!msgbuf)
-+ return -ENOMEM;
-+
-+ error = mutex_lock_interruptible(&syslog_lock);
-+ if (error)
-+ goto out;
-+
- if (source == SYSLOG_FROM_PROC) {
- /*
- * Short-cut for poll(/"proc/kmsg") which simply checks
- * for pending data, not the size; return the count of
- * records, not the length.
- */
-- error = log_next_seq - syslog_seq;
-+ error = count_remaining(&syslog_iter, 0, msgbuf,
-+ PRINTK_RECORD_MAX, true,
-+ printk_time);
- } else {
-- u64 seq = syslog_seq;
-- u32 idx = syslog_idx;
-- bool time = syslog_partial ? syslog_time : printk_time;
--
-- while (seq < log_next_seq) {
-- struct printk_log *msg = log_from_idx(idx);
--
-- error += msg_print_text(msg, true, time, NULL,
-- 0);
-- time = printk_time;
-- idx = log_next(idx);
-- seq++;
-- }
-+ error = count_remaining(&syslog_iter, 0, msgbuf,
-+ PRINTK_RECORD_MAX, false,
-+ printk_time);
- error -= syslog_partial;
- }
-- logbuf_unlock_irq();
-+
-+ mutex_unlock(&syslog_lock);
- break;
- /* Size of the log buffer */
- case SYSLOG_ACTION_SIZE_BUFFER:
-- error = log_buf_len;
-+ error = prb_buffer_size(&printk_rb);
- break;
- default:
- error = -EINVAL;
- break;
- }
--
-+out:
-+ if (msgbuf)
-+ kfree(msgbuf);
-+ if (text)
-+ kfree(text);
- return error;
- }
-
-@@ -1983,7 +2114,6 @@ EXPORT_SYMBOL(printk);
- #define printk_time false
-
- static u64 syslog_seq;
--static u32 syslog_idx;
- static u64 log_first_seq;
- static u32 log_first_idx;
- static u64 log_next_seq;
diff --git a/debian/patches-rt/0023-printk-reduce-setup_text_buf-size-to-LOG_LINE_MAX.patch b/debian/patches-rt/0023-printk-reduce-setup_text_buf-size-to-LOG_LINE_MAX.patch
new file mode 100644
index 000000000..df8e81479
--- /dev/null
+++ b/debian/patches-rt/0023-printk-reduce-setup_text_buf-size-to-LOG_LINE_MAX.patch
@@ -0,0 +1,28 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 30 Sep 2020 11:07:34 +0206
+Subject: [PATCH 23/25] printk: reduce setup_text_buf size to LOG_LINE_MAX
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+@setup_text_buf only copies the original text messages (without any
+prefix or extended text). It only needs to be LOG_LINE_MAX in size.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20200930090134.8723-3-john.ogness@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1103,7 +1103,7 @@ static unsigned int __init add_to_rb(str
+ return prb_record_text_space(&e);
+ }
+
+-static char setup_text_buf[CONSOLE_EXT_LOG_MAX] __initdata;
++static char setup_text_buf[LOG_LINE_MAX] __initdata;
+
+ void __init setup_log_buf(int early)
+ {
diff --git a/debian/patches-rt/0024-printk-Use-fallthrough-pseudo-keyword.patch b/debian/patches-rt/0024-printk-Use-fallthrough-pseudo-keyword.patch
new file mode 100644
index 000000000..3319c2e0a
--- /dev/null
+++ b/debian/patches-rt/0024-printk-Use-fallthrough-pseudo-keyword.patch
@@ -0,0 +1,29 @@
+From: "Gustavo A. R. Silva" <gustavoars@kernel.org>
+Date: Fri, 2 Oct 2020 17:46:27 -0500
+Subject: [PATCH 24/25] printk: Use fallthrough pseudo-keyword
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+Replace /* FALL THRU */ comment with the new pseudo-keyword macro
+fallthrough[1].
+
+[1] https://www.kernel.org/doc/html/v5.7/process/deprecated.html?highlight=fallthrough#implicit-switch-case-fall-through
+
+Signed-off-by: Gustavo A. R. Silva <gustavoars@kernel.org>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20201002224627.GA30475@embeddedor
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1617,7 +1617,7 @@ int do_syslog(int type, char __user *buf
+ /* Read/clear last kernel messages */
+ case SYSLOG_ACTION_READ_CLEAR:
+ clear = true;
+- /* FALL THRU */
++ fallthrough;
+ /* Read last kernel messages */
+ case SYSLOG_ACTION_READ_ALL:
+ if (!buf || len < 0)
diff --git a/debian/patches-rt/0024-printk-implement-kmsg_dump.patch b/debian/patches-rt/0024-printk-implement-kmsg_dump.patch
deleted file mode 100644
index 926cbf586..000000000
--- a/debian/patches-rt/0024-printk-implement-kmsg_dump.patch
+++ /dev/null
@@ -1,398 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:30:02 +0100
-Subject: [PATCH 24/25] printk: implement kmsg_dump
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-Since printk messages are now logged to a new ring buffer, update
-the kmsg_dump functions to pull the messages from there.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/kmsg_dump.h | 6 -
- kernel/printk/printk.c | 258 ++++++++++++++++++++++++----------------------
- 2 files changed, 139 insertions(+), 125 deletions(-)
-
---- a/include/linux/kmsg_dump.h
-+++ b/include/linux/kmsg_dump.h
-@@ -45,10 +45,8 @@ struct kmsg_dumper {
- bool registered;
-
- /* private state of the kmsg iterator */
-- u32 cur_idx;
-- u32 next_idx;
-- u64 cur_seq;
-- u64 next_seq;
-+ u64 line_seq;
-+ u64 buffer_end_seq;
- };
-
- #ifdef CONFIG_PRINTK
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -417,13 +417,13 @@ static size_t syslog_partial;
- static bool syslog_time;
-
- /* index and sequence number of the first record stored in the buffer */
--static u64 log_first_seq;
- static u32 log_first_idx;
-
- /* index and sequence number of the next record to store in the buffer */
--static u64 log_next_seq;
- static u32 log_next_idx;
-
-+static DEFINE_MUTEX(kmsg_dump_lock);
-+
- /* the next printk record to read after the last 'clear' command */
- static u64 clear_seq;
- static u32 clear_idx;
-@@ -470,38 +470,6 @@ static char *log_dict(const struct print
- return (char *)msg + sizeof(struct printk_log) + msg->text_len;
- }
-
--/* get record by index; idx must point to valid msg */
--static struct printk_log *log_from_idx(u32 idx)
--{
-- struct printk_log *msg = (struct printk_log *)(log_buf + idx);
--
-- /*
-- * A length == 0 record is the end of buffer marker. Wrap around and
-- * read the message at the start of the buffer.
-- */
-- if (!msg->len)
-- return (struct printk_log *)log_buf;
-- return msg;
--}
--
--/* get next record; idx must point to valid msg */
--static u32 log_next(u32 idx)
--{
-- struct printk_log *msg = (struct printk_log *)(log_buf + idx);
--
-- /* length == 0 indicates the end of the buffer; wrap */
-- /*
-- * A length == 0 record is the end of buffer marker. Wrap around and
-- * read the message at the start of the buffer as *this* one, and
-- * return the one after that.
-- */
-- if (!msg->len) {
-- msg = (struct printk_log *)log_buf;
-- return msg->len;
-- }
-- return idx + msg->len;
--}
--
- static void printk_emergency(char *buffer, int level, u64 ts_nsec, u16 cpu,
- char *text, u16 text_len);
-
-@@ -2114,9 +2082,7 @@ EXPORT_SYMBOL(printk);
- #define printk_time false
-
- static u64 syslog_seq;
--static u64 log_first_seq;
- static u32 log_first_idx;
--static u64 log_next_seq;
- static char *log_text(const struct printk_log *msg) { return NULL; }
- static char *log_dict(const struct printk_log *msg) { return NULL; }
- static struct printk_log *log_from_idx(u32 idx) { return NULL; }
-@@ -3093,7 +3059,6 @@ EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
- void kmsg_dump(enum kmsg_dump_reason reason)
- {
- struct kmsg_dumper *dumper;
-- unsigned long flags;
-
- rcu_read_lock();
- list_for_each_entry_rcu(dumper, &dump_list, list) {
-@@ -3113,12 +3078,7 @@ void kmsg_dump(enum kmsg_dump_reason rea
- /* initialize iterator with data about the stored records */
- dumper->active = true;
-
-- logbuf_lock_irqsave(flags);
-- dumper->cur_seq = clear_seq;
-- dumper->cur_idx = clear_idx;
-- dumper->next_seq = log_next_seq;
-- dumper->next_idx = log_next_idx;
-- logbuf_unlock_irqrestore(flags);
-+ kmsg_dump_rewind(dumper);
-
- /* invoke dumper which will iterate over records */
- dumper->dump(dumper, reason);
-@@ -3151,33 +3111,67 @@ void kmsg_dump(enum kmsg_dump_reason rea
- bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
- char *line, size_t size, size_t *len)
- {
-+ struct prb_iterator iter;
- struct printk_log *msg;
-- size_t l = 0;
-- bool ret = false;
-+ struct prb_handle h;
-+ bool cont = false;
-+ char *msgbuf;
-+ char *rbuf;
-+ size_t l;
-+ u64 seq;
-+ int ret;
-
- if (!dumper->active)
-- goto out;
-+ return cont;
-+
-+ rbuf = prb_reserve(&h, &sprint_rb, PRINTK_RECORD_MAX);
-+ if (!rbuf)
-+ return cont;
-+ msgbuf = rbuf;
-+retry:
-+ for (;;) {
-+ prb_iter_init(&iter, &printk_rb, &seq);
-+
-+ if (dumper->line_seq == seq) {
-+ /* already where we want to be */
-+ break;
-+ } else if (dumper->line_seq < seq) {
-+ /* messages are gone, move to first available one */
-+ dumper->line_seq = seq;
-+ break;
-+ }
-
-- if (dumper->cur_seq < log_first_seq) {
-- /* messages are gone, move to first available one */
-- dumper->cur_seq = log_first_seq;
-- dumper->cur_idx = log_first_idx;
-+ ret = prb_iter_seek(&iter, dumper->line_seq);
-+ if (ret > 0) {
-+ /* seeked to line_seq */
-+ break;
-+ } else if (ret == 0) {
-+ /*
-+ * The end of the list was hit without ever seeing
-+ * line_seq. Reset it to the beginning of the list.
-+ */
-+ prb_iter_init(&iter, &printk_rb, &dumper->line_seq);
-+ break;
-+ }
-+ /* iterator invalid, start over */
- }
-
-- /* last entry */
-- if (dumper->cur_seq >= log_next_seq)
-+ ret = prb_iter_next(&iter, msgbuf, PRINTK_RECORD_MAX,
-+ &dumper->line_seq);
-+ if (ret == 0)
- goto out;
-+ else if (ret < 0)
-+ goto retry;
-
-- msg = log_from_idx(dumper->cur_idx);
-+ msg = (struct printk_log *)msgbuf;
- l = msg_print_text(msg, syslog, printk_time, line, size);
-
-- dumper->cur_idx = log_next(dumper->cur_idx);
-- dumper->cur_seq++;
-- ret = true;
--out:
- if (len)
- *len = l;
-- return ret;
-+ cont = true;
-+out:
-+ prb_commit(&h);
-+ return cont;
- }
-
- /**
-@@ -3200,12 +3194,11 @@ bool kmsg_dump_get_line_nolock(struct km
- bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
- char *line, size_t size, size_t *len)
- {
-- unsigned long flags;
- bool ret;
-
-- logbuf_lock_irqsave(flags);
-+ mutex_lock(&kmsg_dump_lock);
- ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
-- logbuf_unlock_irqrestore(flags);
-+ mutex_unlock(&kmsg_dump_lock);
-
- return ret;
- }
-@@ -3233,74 +3226,101 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
- bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
- char *buf, size_t size, size_t *len)
- {
-- unsigned long flags;
-- u64 seq;
-- u32 idx;
-- u64 next_seq;
-- u32 next_idx;
-- size_t l = 0;
-- bool ret = false;
-+ struct prb_iterator iter;
- bool time = printk_time;
-+ struct printk_log *msg;
-+ u64 new_end_seq = 0;
-+ struct prb_handle h;
-+ bool cont = false;
-+ char *msgbuf;
-+ u64 end_seq;
-+ int textlen;
-+ u64 seq = 0;
-+ char *rbuf;
-+ int l = 0;
-+ int ret;
-
- if (!dumper->active)
-- goto out;
-+ return cont;
-
-- logbuf_lock_irqsave(flags);
-- if (dumper->cur_seq < log_first_seq) {
-- /* messages are gone, move to first available one */
-- dumper->cur_seq = log_first_seq;
-- dumper->cur_idx = log_first_idx;
-- }
-+ rbuf = prb_reserve(&h, &sprint_rb, PRINTK_RECORD_MAX);
-+ if (!rbuf)
-+ return cont;
-+ msgbuf = rbuf;
-
-- /* last entry */
-- if (dumper->cur_seq >= dumper->next_seq) {
-- logbuf_unlock_irqrestore(flags);
-- goto out;
-- }
--
-- /* calculate length of entire buffer */
-- seq = dumper->cur_seq;
-- idx = dumper->cur_idx;
-- while (seq < dumper->next_seq) {
-- struct printk_log *msg = log_from_idx(idx);
-+ prb_iter_init(&iter, &printk_rb, NULL);
-
-- l += msg_print_text(msg, true, time, NULL, 0);
-- idx = log_next(idx);
-- seq++;
-+ /*
-+ * seek to the start record, which is set/modified
-+ * by kmsg_dump_get_line_nolock()
-+ */
-+ ret = prb_iter_seek(&iter, dumper->line_seq);
-+ if (ret <= 0)
-+ prb_iter_init(&iter, &printk_rb, &seq);
-+
-+ /* work with a local end seq to have a constant value */
-+ end_seq = dumper->buffer_end_seq;
-+ if (!end_seq) {
-+ /* initialize end seq to "infinity" */
-+ end_seq = -1;
-+ dumper->buffer_end_seq = end_seq;
- }
-+retry:
-+ if (seq >= end_seq)
-+ goto out;
-
-- /* move first record forward until length fits into the buffer */
-- seq = dumper->cur_seq;
-- idx = dumper->cur_idx;
-- while (l >= size && seq < dumper->next_seq) {
-- struct printk_log *msg = log_from_idx(idx);
-+ /* count the total bytes after seq */
-+ textlen = count_remaining(&iter, end_seq, msgbuf,
-+ PRINTK_RECORD_MAX, 0, time);
-
-- l -= msg_print_text(msg, true, time, NULL, 0);
-- idx = log_next(idx);
-- seq++;
-+ /* move iter forward until length fits into the buffer */
-+ while (textlen > size) {
-+ ret = prb_iter_next(&iter, msgbuf, PRINTK_RECORD_MAX, &seq);
-+ if (ret == 0) {
-+ break;
-+ } else if (ret < 0) {
-+ prb_iter_init(&iter, &printk_rb, &seq);
-+ goto retry;
-+ }
-+
-+ msg = (struct printk_log *)msgbuf;
-+ textlen -= msg_print_text(msg, true, time, NULL, 0);
- }
-
-- /* last message in next interation */
-- next_seq = seq;
-- next_idx = idx;
-+ /* save end seq for the next interation */
-+ new_end_seq = seq + 1;
-
-- l = 0;
-- while (seq < dumper->next_seq) {
-- struct printk_log *msg = log_from_idx(idx);
-+ /* copy messages to buffer */
-+ while (l < size) {
-+ ret = prb_iter_next(&iter, msgbuf, PRINTK_RECORD_MAX, &seq);
-+ if (ret == 0) {
-+ break;
-+ } else if (ret < 0) {
-+ /*
-+ * iterator (and thus also the start position)
-+ * invalid, start over from beginning of list
-+ */
-+ prb_iter_init(&iter, &printk_rb, NULL);
-+ continue;
-+ }
-
-- l += msg_print_text(msg, syslog, time, buf + l, size - l);
-- idx = log_next(idx);
-- seq++;
-+ if (seq >= end_seq)
-+ break;
-+
-+ msg = (struct printk_log *)msgbuf;
-+ textlen = msg_print_text(msg, syslog, time, buf + l, size - l);
-+ if (textlen > 0)
-+ l += textlen;
-+ cont = true;
- }
-
-- dumper->next_seq = next_seq;
-- dumper->next_idx = next_idx;
-- ret = true;
-- logbuf_unlock_irqrestore(flags);
--out:
-- if (len)
-+ if (cont && len)
- *len = l;
-- return ret;
-+out:
-+ prb_commit(&h);
-+ if (new_end_seq)
-+ dumper->buffer_end_seq = new_end_seq;
-+ return cont;
- }
- EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
-
-@@ -3316,10 +3336,8 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
- */
- void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
- {
-- dumper->cur_seq = clear_seq;
-- dumper->cur_idx = clear_idx;
-- dumper->next_seq = log_next_seq;
-- dumper->next_idx = log_next_idx;
-+ dumper->line_seq = 0;
-+ dumper->buffer_end_seq = 0;
- }
-
- /**
-@@ -3332,11 +3350,9 @@ void kmsg_dump_rewind_nolock(struct kmsg
- */
- void kmsg_dump_rewind(struct kmsg_dumper *dumper)
- {
-- unsigned long flags;
--
-- logbuf_lock_irqsave(flags);
-+ mutex_lock(&kmsg_dump_lock);
- kmsg_dump_rewind_nolock(dumper);
-- logbuf_unlock_irqrestore(flags);
-+ mutex_unlock(&kmsg_dump_lock);
- }
- EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
-
diff --git a/debian/patches-rt/0024-xfrm-Use-sequence-counter-with-associated-spinlock.patch b/debian/patches-rt/0024-xfrm-Use-sequence-counter-with-associated-spinlock.patch
index c09ce85f1..8518dca78 100644
--- a/debian/patches-rt/0024-xfrm-Use-sequence-counter-with-associated-spinlock.patch
+++ b/debian/patches-rt/0024-xfrm-Use-sequence-counter-with-associated-spinlock.patch
@@ -1,7 +1,7 @@
From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
Date: Wed, 10 Jun 2020 12:53:22 +0200
Subject: [PATCH 24/24] xfrm: Use sequence counter with associated spinlock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
A sequence counter write side critical section must be protected by some
form of locking to serialize writers. A plain seqcount_t does not
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_lock_bh(&net->xfrm.xfrm_state_lock);
write_seqcount_begin(&xfrm_state_hash_generation);
-@@ -2557,6 +2562,8 @@ int __net_init xfrm_state_init(struct ne
+@@ -2589,6 +2594,8 @@ int __net_init xfrm_state_init(struct ne
net->xfrm.state_num = 0;
INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
spin_lock_init(&net->xfrm.xfrm_state_lock);
diff --git a/debian/patches-rt/0025-printk-remove-unused-code.patch b/debian/patches-rt/0025-printk-remove-unused-code.patch
deleted file mode 100644
index 98539d3ab..000000000
--- a/debian/patches-rt/0025-printk-remove-unused-code.patch
+++ /dev/null
@@ -1,362 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 12 Feb 2019 15:30:03 +0100
-Subject: [PATCH 25/25] printk: remove unused code
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-Code relating to the safe context and anything dealing with the
-previous log buffer implementation is no longer in use. Remove it.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/internal.h | 43 ------------
- kernel/printk/printk.c | 167 ++++-------------------------------------------
- lib/bust_spinlocks.c | 3
- 3 files changed, 16 insertions(+), 197 deletions(-)
- delete mode 100644 kernel/printk/internal.h
-
---- a/kernel/printk/internal.h
-+++ /dev/null
-@@ -1,43 +0,0 @@
--/* SPDX-License-Identifier: GPL-2.0-or-later */
--/*
-- * internal.h - printk internal definitions
-- */
--#include <linux/percpu.h>
--
--#ifdef CONFIG_PRINTK
--
--#define PRINTK_SAFE_CONTEXT_MASK 0x007ffffff
--#define PRINTK_NMI_DIRECT_CONTEXT_MASK 0x008000000
--#define PRINTK_NMI_CONTEXT_MASK 0xff0000000
--
--#define PRINTK_NMI_CONTEXT_OFFSET 0x010000000
--
--extern raw_spinlock_t logbuf_lock;
--
--__printf(5, 0)
--int vprintk_store(int facility, int level,
-- const char *dict, size_t dictlen,
-- const char *fmt, va_list args);
--
--__printf(1, 0) int vprintk_default(const char *fmt, va_list args);
--__printf(1, 0) int vprintk_deferred(const char *fmt, va_list args);
--__printf(1, 0) int vprintk_func(const char *fmt, va_list args);
--
--void defer_console_output(void);
--
--#else
--
--__printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; }
--
--/*
-- * In !PRINTK builds we still export logbuf_lock spin_lock, console_sem
-- * semaphore and some of console functions (console_unlock()/etc.), so
-- * printk-safe must preserve the existing local IRQ guarantees.
-- */
--#endif /* CONFIG_PRINTK */
--
--#define printk_safe_enter_irqsave(flags) local_irq_save(flags)
--#define printk_safe_exit_irqrestore(flags) local_irq_restore(flags)
--
--#define printk_safe_enter_irq() local_irq_disable()
--#define printk_safe_exit_irq() local_irq_enable()
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -60,7 +60,6 @@
-
- #include "console_cmdline.h"
- #include "braille.h"
--#include "internal.h"
-
- int console_printk[5] = {
- CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */
-@@ -366,41 +365,6 @@ struct printk_log {
- #endif
- ;
-
--/*
-- * The logbuf_lock protects kmsg buffer, indices, counters. This can be taken
-- * within the scheduler's rq lock. It must be released before calling
-- * console_unlock() or anything else that might wake up a process.
-- */
--DEFINE_RAW_SPINLOCK(logbuf_lock);
--
--/*
-- * Helper macros to lock/unlock logbuf_lock and switch between
-- * printk-safe/unsafe modes.
-- */
--#define logbuf_lock_irq() \
-- do { \
-- printk_safe_enter_irq(); \
-- raw_spin_lock(&logbuf_lock); \
-- } while (0)
--
--#define logbuf_unlock_irq() \
-- do { \
-- raw_spin_unlock(&logbuf_lock); \
-- printk_safe_exit_irq(); \
-- } while (0)
--
--#define logbuf_lock_irqsave(flags) \
-- do { \
-- printk_safe_enter_irqsave(flags); \
-- raw_spin_lock(&logbuf_lock); \
-- } while (0)
--
--#define logbuf_unlock_irqrestore(flags) \
-- do { \
-- raw_spin_unlock(&logbuf_lock); \
-- printk_safe_exit_irqrestore(flags); \
-- } while (0)
--
- DECLARE_STATIC_PRINTKRB_CPULOCK(printk_cpulock);
-
- #ifdef CONFIG_PRINTK
-@@ -410,23 +374,15 @@ DECLARE_STATIC_PRINTKRB(printk_rb, CONFI
- static DEFINE_MUTEX(syslog_lock);
- DECLARE_STATIC_PRINTKRB_ITER(syslog_iter, &printk_rb);
-
--DECLARE_WAIT_QUEUE_HEAD(log_wait);
--/* the next printk record to read by syslog(READ) or /proc/kmsg */
-+/* the last printk record to read by syslog(READ) or /proc/kmsg */
- static u64 syslog_seq;
- static size_t syslog_partial;
- static bool syslog_time;
-
--/* index and sequence number of the first record stored in the buffer */
--static u32 log_first_idx;
--
--/* index and sequence number of the next record to store in the buffer */
--static u32 log_next_idx;
--
- static DEFINE_MUTEX(kmsg_dump_lock);
-
- /* the next printk record to read after the last 'clear' command */
- static u64 clear_seq;
--static u32 clear_idx;
-
- #ifdef CONFIG_PRINTK_CALLER
- #define PREFIX_MAX 48
-@@ -438,24 +394,16 @@ static u32 clear_idx;
- #define LOG_LEVEL(v) ((v) & 0x07)
- #define LOG_FACILITY(v) ((v) >> 3 & 0xff)
-
--/* record buffer */
--#define LOG_ALIGN __alignof__(struct printk_log)
--#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
--#define LOG_BUF_LEN_MAX (u32)(1 << 31)
--static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
--static char *log_buf = __log_buf;
--static u32 log_buf_len = __LOG_BUF_LEN;
--
- /* Return log buffer address */
- char *log_buf_addr_get(void)
- {
-- return log_buf;
-+ return printk_rb.buffer;
- }
-
- /* Return log buffer size */
- u32 log_buf_len_get(void)
- {
-- return log_buf_len;
-+ return (1 << printk_rb.size_bits);
- }
-
- /* human readable text of the record */
-@@ -988,11 +936,6 @@ const struct file_operations kmsg_fops =
- */
- void log_buf_vmcoreinfo_setup(void)
- {
-- VMCOREINFO_SYMBOL(log_buf);
-- VMCOREINFO_SYMBOL(log_buf_len);
-- VMCOREINFO_SYMBOL(log_first_idx);
-- VMCOREINFO_SYMBOL(clear_idx);
-- VMCOREINFO_SYMBOL(log_next_idx);
- /*
- * Export struct printk_log size and field offsets. User space tools can
- * parse it and detect any changes to structure down the line.
-@@ -1008,6 +951,8 @@ void log_buf_vmcoreinfo_setup(void)
- }
- #endif
-
-+/* FIXME: no support for buffer resizing */
-+#if 0
- /* requested log_buf_len from kernel cmdline */
- static unsigned long __initdata new_log_buf_len;
-
-@@ -1073,9 +1018,12 @@ static void __init log_buf_add_cpu(void)
- #else /* !CONFIG_SMP */
- static inline void log_buf_add_cpu(void) {}
- #endif /* CONFIG_SMP */
-+#endif /* 0 */
-
- void __init setup_log_buf(int early)
- {
-+/* FIXME: no support for buffer resizing */
-+#if 0
- unsigned long flags;
- char *new_log_buf;
- unsigned int free;
-@@ -1107,6 +1055,7 @@ void __init setup_log_buf(int early)
- pr_info("log_buf_len: %u bytes\n", log_buf_len);
- pr_info("early log buf free: %u(%u%%)\n",
- free, (free * 100) / __LOG_BUF_LEN);
-+#endif
- }
-
- static bool __read_mostly ignore_loglevel;
-@@ -2024,7 +1973,7 @@ asmlinkage int vprintk_emit(int facility
- }
- EXPORT_SYMBOL(vprintk_emit);
-
--__printf(1, 0) int vprintk_func(const char *fmt, va_list args)
-+static __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
- {
- return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
- }
-@@ -2035,12 +1984,6 @@ asmlinkage int vprintk(const char *fmt,
- }
- EXPORT_SYMBOL(vprintk);
-
--int vprintk_default(const char *fmt, va_list args)
--{
-- return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
--}
--EXPORT_SYMBOL_GPL(vprintk_default);
--
- /**
- * printk - print a kernel message
- * @fmt: format string
-@@ -2074,31 +2017,6 @@ asmlinkage __visible int printk(const ch
- return r;
- }
- EXPORT_SYMBOL(printk);
--
--#else /* CONFIG_PRINTK */
--
--#define LOG_LINE_MAX 0
--#define PREFIX_MAX 0
--#define printk_time false
--
--static u64 syslog_seq;
--static u32 log_first_idx;
--static char *log_text(const struct printk_log *msg) { return NULL; }
--static char *log_dict(const struct printk_log *msg) { return NULL; }
--static struct printk_log *log_from_idx(u32 idx) { return NULL; }
--static u32 log_next(u32 idx) { return 0; }
--static ssize_t msg_print_ext_header(char *buf, size_t size,
-- struct printk_log *msg,
-- u64 seq) { return 0; }
--static ssize_t msg_print_ext_body(char *buf, size_t size,
-- char *dict, size_t dict_len,
-- char *text, size_t text_len) { return 0; }
--static void call_console_drivers(u64 seq, const char *ext_text, size_t ext_len,
-- const char *text, size_t len, int level) {}
--static size_t msg_print_text(const struct printk_log *msg, bool syslog,
-- bool time, char *buf, size_t size) { return 0; }
--static bool suppress_message_printing(int level) { return false; }
--
- #endif /* CONFIG_PRINTK */
-
- #ifdef CONFIG_EARLY_PRINTK
-@@ -2402,15 +2320,10 @@ void console_unblank(void)
- void console_flush_on_panic(enum con_flush_mode mode)
- {
- /*
-- * If someone else is holding the console lock, trylock will fail
-- * and may_schedule may be set. Ignore and proceed to unlock so
-- * that messages are flushed out. As this can be called from any
-- * context and we don't want to get preempted while flushing,
-- * ensure may_schedule is cleared.
-+ * FIXME: This is currently a NOP. Emergency messages will have been
-+ * printed, but what about if write_atomic is not available on the
-+ * console? What if the printk kthread is still alive?
- */
-- console_trylock();
-- console_may_schedule = 0;
-- console_unlock();
- }
-
- /*
-@@ -2801,43 +2714,6 @@ static int __init printk_late_init(void)
- late_initcall(printk_late_init);
-
- #if defined CONFIG_PRINTK
--/*
-- * Delayed printk version, for scheduler-internal messages:
-- */
--#define PRINTK_PENDING_WAKEUP 0x01
--#define PRINTK_PENDING_OUTPUT 0x02
--
--static DEFINE_PER_CPU(int, printk_pending);
--
--static void wake_up_klogd_work_func(struct irq_work *irq_work)
--{
-- int pending = __this_cpu_xchg(printk_pending, 0);
--
-- if (pending & PRINTK_PENDING_OUTPUT) {
-- /* If trylock fails, someone else is doing the printing */
-- if (console_trylock())
-- console_unlock();
-- }
--
-- if (pending & PRINTK_PENDING_WAKEUP)
-- wake_up_interruptible(&log_wait);
--}
--
--static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
-- .func = wake_up_klogd_work_func,
-- .flags = ATOMIC_INIT(IRQ_WORK_LAZY),
--};
--
--void wake_up_klogd(void)
--{
-- preempt_disable();
-- if (waitqueue_active(&log_wait)) {
-- this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
-- irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
-- }
-- preempt_enable();
--}
--
- static int printk_kthread_func(void *data)
- {
- struct prb_iterator iter;
-@@ -2904,22 +2780,9 @@ static int __init init_printk_kthread(vo
- }
- late_initcall(init_printk_kthread);
-
--void defer_console_output(void)
-+__printf(1, 0) static int vprintk_deferred(const char *fmt, va_list args)
- {
-- preempt_disable();
-- __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
-- irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
-- preempt_enable();
--}
--
--int vprintk_deferred(const char *fmt, va_list args)
--{
-- int r;
--
-- r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
-- defer_console_output();
--
-- return r;
-+ return vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
- }
-
- int printk_deferred(const char *fmt, ...)
---- a/lib/bust_spinlocks.c
-+++ b/lib/bust_spinlocks.c
-@@ -26,7 +26,6 @@ void bust_spinlocks(int yes)
- unblank_screen();
- #endif
- console_unblank();
-- if (--oops_in_progress == 0)
-- wake_up_klogd();
-+ --oops_in_progress;
- }
- }
diff --git a/debian/patches-rt/0025-printk-ringbuffer-Wrong-data-pointer-when-appending-.patch b/debian/patches-rt/0025-printk-ringbuffer-Wrong-data-pointer-when-appending-.patch
new file mode 100644
index 000000000..076e2d98f
--- /dev/null
+++ b/debian/patches-rt/0025-printk-ringbuffer-Wrong-data-pointer-when-appending-.patch
@@ -0,0 +1,141 @@
+From: Petr Mladek <mladek.petr@gmail.com>
+Date: Wed, 14 Oct 2020 19:50:51 +0200
+Subject: [PATCH 25/25] printk: ringbuffer: Wrong data pointer when appending
+ small string
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+data_realloc() returns wrong data pointer when the block is wrapped and
+the size is not increased. It might happen when pr_cont() wants to
+add only few characters and there is already a space for them because
+of alignment.
+
+It might cause writing outsite the buffer. It has been detected by LTP
+tests with KASAN enabled:
+
+[ 221.921944] oom-kill:constraint=CONSTRAINT_MEMCG,nodemask=(null),cpuset=c,mems_allowed=0,oom_memcg=/0,task_memcg=in
+[ 221.922108] ==================================================================
+[ 221.922111] BUG: KASAN: global-out-of-bounds in vprintk_store+0x362/0x3d0
+[ 221.922112] Write of size 2 at addr ffffffffba51dbcd by task
+memcg_test_1/11282
+[ 221.922113]
+[ 221.922114] CPU: 1 PID: 11282 Comm: memcg_test_1 Not tainted
+5.9.0-next-20201013 #1
+[ 221.922116] Hardware name: Supermicro SYS-5019S-ML/X11SSH-F, BIOS
+2.0b 07/27/2017
+[ 221.922116] Call Trace:
+[ 221.922117] dump_stack+0xa4/0xd9
+[ 221.922118] print_address_description.constprop.0+0x21/0x210
+[ 221.922119] ? _raw_write_lock_bh+0xe0/0xe0
+[ 221.922120] ? vprintk_store+0x362/0x3d0
+[ 221.922121] kasan_report.cold+0x37/0x7c
+[ 221.922122] ? vprintk_store+0x362/0x3d0
+[ 221.922123] check_memory_region+0x18c/0x1f0
+[ 221.922124] memcpy+0x3c/0x60
+[ 221.922125] vprintk_store+0x362/0x3d0
+[ 221.922125] ? __ia32_sys_syslog+0x50/0x50
+[ 221.922126] ? _raw_spin_lock_irqsave+0x9b/0x100
+[ 221.922127] ? _raw_spin_lock_irq+0xf0/0xf0
+[ 221.922128] ? __kasan_check_write+0x14/0x20
+[ 221.922129] vprintk_emit+0x8d/0x1f0
+[ 221.922130] vprintk_default+0x1d/0x20
+[ 221.922131] vprintk_func+0x5a/0x100
+[ 221.922132] printk+0xb2/0xe3
+[ 221.922133] ? swsusp_write.cold+0x189/0x189
+[ 221.922134] ? kernfs_vfs_xattr_set+0x60/0x60
+[ 221.922134] ? _raw_write_lock_bh+0xe0/0xe0
+[ 221.922135] ? trace_hardirqs_on+0x38/0x100
+[ 221.922136] pr_cont_kernfs_path.cold+0x49/0x4b
+[ 221.922137] mem_cgroup_print_oom_context.cold+0x74/0xc3
+[ 221.922138] dump_header+0x340/0x3bf
+[ 221.922139] oom_kill_process.cold+0xb/0x10
+[ 221.922140] out_of_memory+0x1e9/0x860
+[ 221.922141] ? oom_killer_disable+0x210/0x210
+[ 221.922142] mem_cgroup_out_of_memory+0x198/0x1c0
+[ 221.922143] ? mem_cgroup_count_precharge_pte_range+0x250/0x250
+[ 221.922144] try_charge+0xa9b/0xc50
+[ 221.922145] ? arch_stack_walk+0x9e/0xf0
+[ 221.922146] ? memory_high_write+0x230/0x230
+[ 221.922146] ? avc_has_extended_perms+0x830/0x830
+[ 221.922147] ? stack_trace_save+0x94/0xc0
+[ 221.922148] ? stack_trace_consume_entry+0x90/0x90
+[ 221.922149] __memcg_kmem_charge+0x73/0x120
+[ 221.922150] ? cred_has_capability+0x10f/0x200
+[ 221.922151] ? mem_cgroup_can_attach+0x260/0x260
+[ 221.922152] ? selinux_sb_eat_lsm_opts+0x2f0/0x2f0
+[ 221.922153] ? obj_cgroup_charge+0x16b/0x220
+[ 221.922154] ? kmem_cache_alloc+0x78/0x4c0
+[ 221.922155] obj_cgroup_charge+0x122/0x220
+[ 221.922156] ? vm_area_alloc+0x20/0x90
+[ 221.922156] kmem_cache_alloc+0x78/0x4c0
+[ 221.922157] vm_area_alloc+0x20/0x90
+[ 221.922158] mmap_region+0x3ed/0x9a0
+[ 221.922159] ? cap_mmap_addr+0x1d/0x80
+[ 221.922160] do_mmap+0x3ee/0x720
+[ 221.922161] vm_mmap_pgoff+0x16a/0x1c0
+[ 221.922162] ? randomize_stack_top+0x90/0x90
+[ 221.922163] ? copy_page_range+0x1980/0x1980
+[ 221.922163] ksys_mmap_pgoff+0xab/0x350
+[ 221.922164] ? find_mergeable_anon_vma+0x110/0x110
+[ 221.922165] ? __audit_syscall_entry+0x1a6/0x1e0
+[ 221.922166] __x64_sys_mmap+0x8d/0xb0
+[ 221.922167] do_syscall_64+0x38/0x50
+[ 221.922168] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+[ 221.922169] RIP: 0033:0x7fe8f5e75103
+[ 221.922172] Code: 54 41 89 d4 55 48 89 fd 53 4c 89 cb 48 85 ff 74
+56 49 89 d9 45 89 f8 45 89 f2 44 89 e2 4c 89 ee 48 89 ef b8 09 00 00
+00 0f 05 <48> 3d 00 f0 ff ff 77 7d 5b 5d 41 5c 41 5d 41 5e 41 5f c3 66
+2e 0f
+[ 221.922173] RSP: 002b:00007ffd38c90198 EFLAGS: 00000246 ORIG_RAX:
+0000000000000009
+[ 221.922175] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fe8f5e75103
+[ 221.922176] RDX: 0000000000000003 RSI: 0000000000001000 RDI: 0000000000000000
+[ 221.922178] RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000
+[ 221.922179] R10: 0000000000002022 R11: 0000000000000246 R12: 0000000000000003
+[ 221.922180] R13: 0000000000001000 R14: 0000000000002022 R15: 0000000000000000
+[ 221.922181]
+[ 213O[ 221.922182] The buggy address belongs to the variable:
+[ 221.922183] clear_seq+0x2d/0x40
+[ 221.922183]
+[ 221.922184] Memory state around the buggy address:
+[ 221.922185] ffffffffba51da80: 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00
+[ 221.922187] ffffffffba51db00: 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00
+[ 221.922188] >ffffffffba51db80: f9 f9 f9 f9 00 f9 f9 f9 f9 f9 f9 f9
+00 f9 f9 f9
+[ 221.922189] ^
+[ 221.922190] ffffffffba51dc00: f9 f9 f9 f9 00 f9 f9 f9 f9 f9 f9 f9
+00 f9 f9 f9
+[ 221.922191] ffffffffba51dc80: f9 f9 f9 f9 01 f9 f9 f9 f9 f9 f9 f9
+00 f9 f9 f9
+[ 221.922193] ==================================================================
+[ 221.922194] Disabling lock debugging due to kernel taint
+[ 221.922196] ,task=memcg_test_1,pid=11280,uid=0
+[ 221.922205] Memory cgroup out of memory: Killed process 11280
+
+Link: https://lore.kernel.org/r/CA+G9fYt46oC7-BKryNDaaXPJ9GztvS2cs_7GjYRjanRi4+ryCQ@mail.gmail.com
+Fixes: 4cfc7258f876a7feba673ac ("printk: ringbuffer: add finalization/extension support")
+Reported-by: Naresh Kamboju <naresh.kamboju@linaro.org>
+Reviewed-by: John Ogness <john.ogness@linutronix.de>
+Acked-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Link: https://lore.kernel.org/r/20201014175051.GC13775@alley
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk_ringbuffer.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/kernel/printk/printk_ringbuffer.c
++++ b/kernel/printk/printk_ringbuffer.c
+@@ -1125,7 +1125,10 @@ static char *data_realloc(struct printk_
+
+ /* If the data block does not increase, there is nothing to do. */
+ if (head_lpos - next_lpos < DATA_SIZE(data_ring)) {
+- blk = to_block(data_ring, blk_lpos->begin);
++ if (wrapped)
++ blk = to_block(data_ring, 0);
++ else
++ blk = to_block(data_ring, blk_lpos->begin);
+ return &blk->data[0];
+ }
+
diff --git a/debian/patches-rt/ARM-Allow-to-enable-RT.patch b/debian/patches-rt/ARM-Allow-to-enable-RT.patch
index 0603ff475..f0978b135 100644
--- a/debian/patches-rt/ARM-Allow-to-enable-RT.patch
+++ b/debian/patches-rt/ARM-Allow-to-enable-RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 11 Oct 2019 13:14:29 +0200
Subject: [PATCH] ARM: Allow to enable RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Allow to select RT.
diff --git a/debian/patches-rt/ARM-enable-irq-in-translation-section-permission-fau.patch b/debian/patches-rt/ARM-enable-irq-in-translation-section-permission-fau.patch
index 941fedc69..ead6f053f 100644
--- a/debian/patches-rt/ARM-enable-irq-in-translation-section-permission-fau.patch
+++ b/debian/patches-rt/ARM-enable-irq-in-translation-section-permission-fau.patch
@@ -1,7 +1,7 @@
From: "Yadi.hu" <yadi.hu@windriver.com>
Date: Wed, 10 Dec 2014 10:32:09 +0800
Subject: ARM: enable irq in translation/section permission fault handlers
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Probably happens on all ARM, with
CONFIG_PREEMPT_RT
diff --git a/debian/patches-rt/ARM64-Allow-to-enable-RT.patch b/debian/patches-rt/ARM64-Allow-to-enable-RT.patch
index 694c93162..4b745140f 100644
--- a/debian/patches-rt/ARM64-Allow-to-enable-RT.patch
+++ b/debian/patches-rt/ARM64-Allow-to-enable-RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 11 Oct 2019 13:14:35 +0200
Subject: [PATCH] ARM64: Allow to enable RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Allow to select RT.
diff --git a/debian/patches-rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/debian/patches-rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
index 762375c3b..e6d9c6e56 100644
--- a/debian/patches-rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
+++ b/debian/patches-rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
@@ -1,7 +1,7 @@
From: Josh Cartwright <joshc@ni.com>
Date: Thu, 11 Feb 2016 11:54:01 -0600
Subject: KVM: arm/arm64: downgrade preempt_disable()d region to migrate_disable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
kvm_arch_vcpu_ioctl_run() disables the use of preemption when updating
the vgic and timer states to prevent the calling task from migrating to
diff --git a/debian/patches-rt/POWERPC-Allow-to-enable-RT.patch b/debian/patches-rt/POWERPC-Allow-to-enable-RT.patch
index aaeadcd74..d157c679b 100644
--- a/debian/patches-rt/POWERPC-Allow-to-enable-RT.patch
+++ b/debian/patches-rt/POWERPC-Allow-to-enable-RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 11 Oct 2019 13:14:41 +0200
Subject: [PATCH] POWERPC: Allow to enable RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Allow to select RT.
diff --git a/debian/patches-rt/Use-CONFIG_PREEMPTION.patch b/debian/patches-rt/Use-CONFIG_PREEMPTION.patch
index 2f3f24efc..18e0927f9 100644
--- a/debian/patches-rt/Use-CONFIG_PREEMPTION.patch
+++ b/debian/patches-rt/Use-CONFIG_PREEMPTION.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 26 Jul 2019 11:30:49 +0200
Subject: [PATCH] Use CONFIG_PREEMPTION
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Thisi is an all-in-one patch of the current `PREEMPTION' branch.
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
that low-priority task is not permitted to run on any other CPU,
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
-@@ -259,12 +259,17 @@ static char *get_mmu_str(void)
+@@ -260,12 +260,17 @@ static char *get_mmu_str(void)
static int __die(const char *str, struct pt_regs *regs, long err)
{
diff --git a/debian/patches-rt/add_cpu_light.patch b/debian/patches-rt/add_cpu_light.patch
index 6e9884148..4c8cd58b8 100644
--- a/debian/patches-rt/add_cpu_light.patch
+++ b/debian/patches-rt/add_cpu_light.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat, 27 May 2017 19:02:06 +0200
Subject: kernel/sched: add {put|get}_cpu_light()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
diff --git a/debian/patches-rt/arch-arm64-Add-lazy-preempt-support.patch b/debian/patches-rt/arch-arm64-Add-lazy-preempt-support.patch
index 65b796501..7bf04fcd6 100644
--- a/debian/patches-rt/arch-arm64-Add-lazy-preempt-support.patch
+++ b/debian/patches-rt/arch-arm64-Add-lazy-preempt-support.patch
@@ -1,7 +1,7 @@
From: Anders Roxell <anders.roxell@linaro.org>
Date: Thu, 14 May 2015 17:52:17 +0200
Subject: arch/arm64: Add lazy preempt support
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
arm64 is missing support for PREEMPT_RT. The main feature which is
lacking is support for lazy preemption. The arch-specific entry code,
diff --git a/debian/patches-rt/arm-enable-highmem-for-rt.patch b/debian/patches-rt/arm-enable-highmem-for-rt.patch
index b205ddfd9..59bdc3530 100644
--- a/debian/patches-rt/arm-enable-highmem-for-rt.patch
+++ b/debian/patches-rt/arm-enable-highmem-for-rt.patch
@@ -1,7 +1,7 @@
Subject: arm: Enable highmem for rt
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 13 Feb 2013 11:03:11 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
fixup highmem for ARM.
diff --git a/debian/patches-rt/arm-highmem-flush-tlb-on-unmap.patch b/debian/patches-rt/arm-highmem-flush-tlb-on-unmap.patch
index 077ba4267..0d6da4420 100644
--- a/debian/patches-rt/arm-highmem-flush-tlb-on-unmap.patch
+++ b/debian/patches-rt/arm-highmem-flush-tlb-on-unmap.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 11 Mar 2013 21:37:27 +0100
Subject: arm/highmem: Flush tlb on unmap
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The tlb should be flushed on unmap and thus make the mapping entry
invalid. This is only done in the non-debug case which does not look
diff --git a/debian/patches-rt/arm-preempt-lazy-support.patch b/debian/patches-rt/arm-preempt-lazy-support.patch
index 70aa51633..ade5cffe0 100644
--- a/debian/patches-rt/arm-preempt-lazy-support.patch
+++ b/debian/patches-rt/arm-preempt-lazy-support.patch
@@ -1,7 +1,7 @@
Subject: arm: Add support for lazy preemption
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 31 Oct 2012 12:04:11 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Implement the arm pieces for lazy preempt.
diff --git a/debian/patches-rt/arm-remove-printk_nmi_.patch b/debian/patches-rt/arm-remove-printk_nmi_.patch
deleted file mode 100644
index f93f4e776..000000000
--- a/debian/patches-rt/arm-remove-printk_nmi_.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 15 Feb 2019 14:34:20 +0100
-Subject: [PATCH] arm: remove printk_nmi_.*()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-It is no longer provided by the printk core code.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/arm/kernel/smp.c | 2 --
- 1 file changed, 2 deletions(-)
-
---- a/arch/arm/kernel/smp.c
-+++ b/arch/arm/kernel/smp.c
-@@ -680,11 +680,9 @@ void handle_IPI(int ipinr, struct pt_reg
- break;
-
- case IPI_CPU_BACKTRACE:
-- printk_nmi_enter();
- irq_enter();
- nmi_cpu_backtrace(regs);
- irq_exit();
-- printk_nmi_exit();
- break;
-
- default:
diff --git a/debian/patches-rt/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch b/debian/patches-rt/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch
index d961b600b..9b1672773 100644
--- a/debian/patches-rt/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch
+++ b/debian/patches-rt/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 25 Jul 2018 14:02:38 +0200
Subject: [PATCH] arm64: fpsimd: Delay freeing memory in fpsimd_flush_thread()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
fpsimd_flush_thread() invokes kfree() via sve_free() within a preempt disabled
section which is not working on -RT.
diff --git a/debian/patches-rt/block-mq-drop-preempt-disable.patch b/debian/patches-rt/block-mq-drop-preempt-disable.patch
index 31a7d7695..f905d13a2 100644
--- a/debian/patches-rt/block-mq-drop-preempt-disable.patch
+++ b/debian/patches-rt/block-mq-drop-preempt-disable.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: block/mq: do not invoke preempt_disable()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
preempt_disable() and get_cpu() don't play well together with the sleeping
locks it tries to allocate later.
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -1605,14 +1605,14 @@ static void __blk_mq_delay_run_hw_queue(
+@@ -1571,14 +1571,14 @@ static void __blk_mq_delay_run_hw_queue(
return;
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
diff --git a/debian/patches-rt/bus-mhi-Remove-include-of-rwlock_types.h.patch b/debian/patches-rt/bus-mhi-Remove-include-of-rwlock_types.h.patch
index b80448d54..6799242b6 100644
--- a/debian/patches-rt/bus-mhi-Remove-include-of-rwlock_types.h.patch
+++ b/debian/patches-rt/bus-mhi-Remove-include-of-rwlock_types.h.patch
@@ -1,7 +1,7 @@
From: Clark Williams <clark.williams@gmail.com>
Date: Sat, 12 Sep 2020 14:48:26 -0500
Subject: [PATCH] bus: mhi: Remove include of rwlock_types.h
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
rwlock.h should not be included directly. Instead linux/splinlock.h
should be included. Including it directly will break the RT build.
diff --git a/debian/patches-rt/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch b/debian/patches-rt/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch
index 4308f1301..bff85f7c2 100644
--- a/debian/patches-rt/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch
+++ b/debian/patches-rt/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 3 Jul 2018 18:19:48 +0200
Subject: [PATCH] cgroup: use irqsave in cgroup_rstat_flush_locked()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
All callers of cgroup_rstat_flush_locked() acquire cgroup_rstat_lock
either with spin_lock_irq() or spin_lock_irqsave().
diff --git a/debian/patches-rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch b/debian/patches-rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
index abbb3eee2..908e74336 100644
--- a/debian/patches-rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
+++ b/debian/patches-rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
@@ -1,7 +1,7 @@
From: Mike Galbraith <efault@gmx.de>
Date: Sun, 8 Jan 2017 09:32:25 +0100
Subject: [PATCH] cpuset: Convert callback_lock to raw_spinlock_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The two commits below add up to a cpuset might_sleep() splat for RT:
diff --git a/debian/patches-rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch b/debian/patches-rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch
index c39a2348d..a4f193c26 100644
--- a/debian/patches-rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch
+++ b/debian/patches-rt/crypto-Reduce-preempt-disabled-regions-more-algos.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 21 Feb 2014 17:24:04 +0100
Subject: crypto: Reduce preempt disabled regions, more algos
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Don Estabrook reported
| kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100()
diff --git a/debian/patches-rt/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch b/debian/patches-rt/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch
index d319b3d6f..889a7d1f4 100644
--- a/debian/patches-rt/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch
+++ b/debian/patches-rt/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 26 Jul 2018 18:52:00 +0200
Subject: [PATCH] crypto: cryptd - add a lock instead
preempt_disable/local_bh_disable
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
cryptd has a per-CPU lock which protected with local_bh_disable() and
preempt_disable().
diff --git a/debian/patches-rt/crypto-limit-more-FPU-enabled-sections.patch b/debian/patches-rt/crypto-limit-more-FPU-enabled-sections.patch
index fc27b11b4..cb8899a7c 100644
--- a/debian/patches-rt/crypto-limit-more-FPU-enabled-sections.patch
+++ b/debian/patches-rt/crypto-limit-more-FPU-enabled-sections.patch
@@ -4,7 +4,7 @@ Subject: [PATCH] crypto: limit more FPU-enabled sections
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Those crypto drivers use SSE/AVX/… for their crypto work and in order to
do so in kernel they need to enable the "FPU" in kernel mode which
diff --git a/debian/patches-rt/debugobjects-rt.patch b/debian/patches-rt/debugobjects-rt.patch
index a4c840c16..bb08b513e 100644
--- a/debian/patches-rt/debugobjects-rt.patch
+++ b/debian/patches-rt/debugobjects-rt.patch
@@ -1,7 +1,7 @@
Subject: debugobjects: Make RT aware
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 21:41:35 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Avoid filling the pool / allocating memory with irqs off().
diff --git a/debian/patches-rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/debian/patches-rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
index 04bd82e46..0d5cc6be0 100644
--- a/debian/patches-rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
+++ b/debian/patches-rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
@@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Thu, 31 Mar 2016 04:08:28 +0200
Subject: [PATCH] drivers/block/zram: Replace bit spinlocks with rtmutex
for -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
They're nondeterministic, and lead to ___might_sleep() splats in -rt.
OTOH, they're a lot less wasteful than an rtmutex per page.
diff --git a/debian/patches-rt/drivers-tty-fix-omap-lock-crap.patch b/debian/patches-rt/drivers-tty-fix-omap-lock-crap.patch
index c003231af..da840d692 100644
--- a/debian/patches-rt/drivers-tty-fix-omap-lock-crap.patch
+++ b/debian/patches-rt/drivers-tty-fix-omap-lock-crap.patch
@@ -1,7 +1,7 @@
Subject: tty/serial/omap: Make the locking RT aware
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 28 Jul 2011 13:32:57 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The lock is a sleeping lock and local_irq_save() is not the
optimsation we are looking for. Redo it to make it work on -RT and
diff --git a/debian/patches-rt/drivers-tty-pl011-irq-disable-madness.patch b/debian/patches-rt/drivers-tty-pl011-irq-disable-madness.patch
index eb8ad88e8..0805a9939 100644
--- a/debian/patches-rt/drivers-tty-pl011-irq-disable-madness.patch
+++ b/debian/patches-rt/drivers-tty-pl011-irq-disable-madness.patch
@@ -1,7 +1,7 @@
Subject: tty/serial/pl011: Make the locking work on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 08 Jan 2013 21:36:51 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The lock is a sleeping lock and local_irq_save() is not the optimsation
we are looking for. Redo it to make it work on -RT and non-RT.
diff --git a/debian/patches-rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch b/debian/patches-rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
index 741782b85..b69194169 100644
--- a/debian/patches-rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
+++ b/debian/patches-rt/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
@@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Thu, 20 Oct 2016 11:15:22 +0200
Subject: [PATCH] drivers/zram: Don't disable preemption in
zcomp_stream_get/put()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
In v4.7, the driver switched to percpu compression streams, disabling
preemption via get/put_cpu_ptr(). Use a per-zcomp_strm lock here. We
diff --git a/debian/patches-rt/drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch b/debian/patches-rt/drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
index c79b9bd9e..37f4f3b92 100644
--- a/debian/patches-rt/drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
+++ b/debian/patches-rt/drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
@@ -2,7 +2,7 @@ From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Sat, 27 Feb 2016 09:01:42 +0100
Subject: [PATCH] drm/i915: Don't disable interrupts on PREEMPT_RT during
atomic updates
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Commit
8d7849db3eab7 ("drm/i915: Make sprite updates atomic")
diff --git a/debian/patches-rt/drm-i915-disable-tracing-on-RT.patch b/debian/patches-rt/drm-i915-disable-tracing-on-RT.patch
index cd9e84094..2a20ef530 100644
--- a/debian/patches-rt/drm-i915-disable-tracing-on-RT.patch
+++ b/debian/patches-rt/drm-i915-disable-tracing-on-RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 6 Dec 2018 09:52:20 +0100
Subject: [PATCH] drm/i915: disable tracing on -RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Luca Abeni reported this:
| BUG: scheduling while atomic: kworker/u8:2/15203/0x00000003
diff --git a/debian/patches-rt/drm-i915-gt-Only-disable-interrupts-for-the-timeline.patch b/debian/patches-rt/drm-i915-gt-Only-disable-interrupts-for-the-timeline.patch
index 0e4f4f2a3..b2df22915 100644
--- a/debian/patches-rt/drm-i915-gt-Only-disable-interrupts-for-the-timeline.patch
+++ b/debian/patches-rt/drm-i915-gt-Only-disable-interrupts-for-the-timeline.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 7 Jul 2020 12:25:11 +0200
Subject: [PATCH] drm/i915/gt: Only disable interrupts for the timeline lock on
!force-threaded
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
According to commit
d67739268cf0e ("drm/i915/gt: Mark up the nested engine-pm timeline lock as irqsafe")
diff --git a/debian/patches-rt/drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch b/debian/patches-rt/drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
index 1ac3974e7..1e9776690 100644
--- a/debian/patches-rt/drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
+++ b/debian/patches-rt/drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 19 Dec 2018 10:47:02 +0100
Subject: [PATCH] drm/i915: skip DRM_I915_LOW_LEVEL_TRACEPOINTS with NOTRACE
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The order of the header files is important. If this header file is
included after tracepoint.h was included then the NOTRACE here becomes a
diff --git a/debian/patches-rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch b/debian/patches-rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
index 906d94145..2ce54e1ef 100644
--- a/debian/patches-rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
+++ b/debian/patches-rt/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
@@ -1,7 +1,7 @@
Subject: drm,radeon,i915: Use preempt_disable/enable_rt() where recommended
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Sat, 27 Feb 2016 08:09:11 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
DRM folks identified the spots, so use them.
diff --git a/debian/patches-rt/efi-Allow-efi-runtime.patch b/debian/patches-rt/efi-Allow-efi-runtime.patch
index 8c5bbb1ce..7d6b4c9be 100644
--- a/debian/patches-rt/efi-Allow-efi-runtime.patch
+++ b/debian/patches-rt/efi-Allow-efi-runtime.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 26 Jul 2018 15:06:10 +0200
Subject: [PATCH] efi: Allow efi=runtime
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
In case the command line option "efi=noruntime" is default at built-time, the user
could overwrite its state by `efi=runtime' and allow it again.
diff --git a/debian/patches-rt/efi-Disable-runtime-services-on-RT.patch b/debian/patches-rt/efi-Disable-runtime-services-on-RT.patch
index 972657b17..049f4f745 100644
--- a/debian/patches-rt/efi-Disable-runtime-services-on-RT.patch
+++ b/debian/patches-rt/efi-Disable-runtime-services-on-RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 26 Jul 2018 15:03:16 +0200
Subject: [PATCH] efi: Disable runtime services on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Based on meassurements the EFI functions get_variable /
get_next_variable take up to 2us which looks okay.
diff --git a/debian/patches-rt/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch b/debian/patches-rt/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
index e32b49279..e31aeb669 100644
--- a/debian/patches-rt/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
+++ b/debian/patches-rt/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 20 Oct 2017 11:29:53 +0200
Subject: [PATCH] fs/dcache: disable preemption on i_dir_seq's write side
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
i_dir_seq is an opencoded seqcounter. Based on the code it looks like we
could have two writers in parallel despite the fact that the d_lock is
diff --git a/debian/patches-rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/debian/patches-rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
index 7770a8f65..336e7ec04 100644
--- a/debian/patches-rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
+++ b/debian/patches-rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 14 Sep 2016 14:35:49 +0200
Subject: [PATCH] fs/dcache: use swait_queue instead of waitqueue
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
__d_lookup_done() invokes wake_up_all() while holding a hlist_bl_lock()
which disables preemption. As a workaround convert it to swait.
diff --git a/debian/patches-rt/fs-namespace-use-cpu-chill-in-trylock-loops.patch b/debian/patches-rt/fs-namespace-use-cpu-chill-in-trylock-loops.patch
index 80285a5d8..37f2c226d 100644
--- a/debian/patches-rt/fs-namespace-use-cpu-chill-in-trylock-loops.patch
+++ b/debian/patches-rt/fs-namespace-use-cpu-chill-in-trylock-loops.patch
@@ -1,7 +1,7 @@
Subject: fs: namespace: Use cpu_chill() in trylock loops
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 07 Mar 2012 21:00:34 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Retry loops on RT might loop forever when the modifying side was
preempted. Use cpu_chill() instead of cpu_relax() to let the system
diff --git a/debian/patches-rt/ftrace-migrate-disable-tracing.patch b/debian/patches-rt/ftrace-migrate-disable-tracing.patch
index 3d511bd59..06aed0d53 100644
--- a/debian/patches-rt/ftrace-migrate-disable-tracing.patch
+++ b/debian/patches-rt/ftrace-migrate-disable-tracing.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 21:56:42 +0200
Subject: trace: Add migrate-disabled counter to tracing output
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
diff --git a/debian/patches-rt/genirq-disable-irqpoll-on-rt.patch b/debian/patches-rt/genirq-disable-irqpoll-on-rt.patch
index ce4909fd4..a7717a083 100644
--- a/debian/patches-rt/genirq-disable-irqpoll-on-rt.patch
+++ b/debian/patches-rt/genirq-disable-irqpoll-on-rt.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:57 -0500
Subject: genirq: Disable irqpoll on -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Creates long latencies for no value
diff --git a/debian/patches-rt/genirq-update-irq_set_irqchip_state-documentation.patch b/debian/patches-rt/genirq-update-irq_set_irqchip_state-documentation.patch
index edf8fb144..455f1a27a 100644
--- a/debian/patches-rt/genirq-update-irq_set_irqchip_state-documentation.patch
+++ b/debian/patches-rt/genirq-update-irq_set_irqchip_state-documentation.patch
@@ -1,7 +1,7 @@
From: Josh Cartwright <joshc@ni.com>
Date: Thu, 11 Feb 2016 11:54:00 -0600
Subject: genirq: update irq_set_irqchip_state documentation
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
On -rt kernels, the use of migrate_disable()/migrate_enable() is
sufficient to guarantee a task isn't moved to another CPU. Update the
diff --git a/debian/patches-rt/hrtimer-Allow-raw-wakeups-during-boot.patch b/debian/patches-rt/hrtimer-Allow-raw-wakeups-during-boot.patch
index be6c81583..c6cceb608 100644
--- a/debian/patches-rt/hrtimer-Allow-raw-wakeups-during-boot.patch
+++ b/debian/patches-rt/hrtimer-Allow-raw-wakeups-during-boot.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 9 Aug 2019 15:25:21 +0200
Subject: [PATCH] hrtimer: Allow raw wakeups during boot
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
There are a few wake-up timers during the early boot which are essencial for
the system to make progress. At this stage there are no softirq spawn for the
diff --git a/debian/patches-rt/io_wq-Make-io_wqe-lock-a-raw_spinlock_t.patch b/debian/patches-rt/io_wq-Make-io_wqe-lock-a-raw_spinlock_t.patch
index 5e0c3623d..4a0d4e084 100644
--- a/debian/patches-rt/io_wq-Make-io_wqe-lock-a-raw_spinlock_t.patch
+++ b/debian/patches-rt/io_wq-Make-io_wqe-lock-a-raw_spinlock_t.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 19 Aug 2020 21:44:45 +0200
Subject: [PATCH] io_wq: Make io_wqe::lock a raw_spinlock_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
During a context switch the scheduler invokes wq_worker_sleeping() with
disabled preemption. Disabling preemption is needed because it protects
diff --git a/debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch b/debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch
index a2550be00..afa3c20e0 100644
--- a/debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch
+++ b/debian/patches-rt/irqwork-push_most_work_into_softirq_context.patch
@@ -1,7 +1,7 @@
Subject: irqwork: push most work into softirq context
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 23 Jun 2015 15:32:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Initially we defered all irqwork into softirq because we didn't want the
latency spikes if perf or another user was busy and delayed the RT task.
diff --git a/debian/patches-rt/jump-label-rt.patch b/debian/patches-rt/jump-label-rt.patch
index 1448b571d..d301dfc0e 100644
--- a/debian/patches-rt/jump-label-rt.patch
+++ b/debian/patches-rt/jump-label-rt.patch
@@ -1,7 +1,7 @@
Subject: jump-label: disable if stop_machine() is used
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 08 Jul 2015 17:14:48 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Some architectures are using stop_machine() while switching the opcode which
leads to latency spikes.
diff --git a/debian/patches-rt/kconfig-disable-a-few-options-rt.patch b/debian/patches-rt/kconfig-disable-a-few-options-rt.patch
index 6a02b9c56..a4412961c 100644
--- a/debian/patches-rt/kconfig-disable-a-few-options-rt.patch
+++ b/debian/patches-rt/kconfig-disable-a-few-options-rt.patch
@@ -1,7 +1,7 @@
Subject: kconfig: Disable config options which are not RT compatible
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 24 Jul 2011 12:11:43 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Disable stuff which is known to have issues on RT
diff --git a/debian/patches-rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/debian/patches-rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
index b9256fea3..399919013 100644
--- a/debian/patches-rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
+++ b/debian/patches-rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 21 Nov 2016 19:31:08 +0100
Subject: [PATCH] kernel/sched: move stack + kprobe clean up to
__put_task_struct()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
There is no need to free the stack before the task struct (except for reasons
mentioned in commit 68f24b08ee89 ("sched/core: Free the stack early if
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
security_task_free(tsk);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4092,15 +4092,6 @@ static struct rq *finish_task_switch(str
+@@ -4236,15 +4236,6 @@ static struct rq *finish_task_switch(str
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
diff --git a/debian/patches-rt/leds-trigger-disable-CPU-trigger-on-RT.patch b/debian/patches-rt/leds-trigger-disable-CPU-trigger-on-RT.patch
index 6f64d73ee..02bf85741 100644
--- a/debian/patches-rt/leds-trigger-disable-CPU-trigger-on-RT.patch
+++ b/debian/patches-rt/leds-trigger-disable-CPU-trigger-on-RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 23 Jan 2014 14:45:59 +0100
Subject: leds: trigger: disable CPU trigger on -RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
as it triggers:
|CPU: 0 PID: 0 Comm: swapper Not tainted 3.12.8-rt10 #141
diff --git a/debian/patches-rt/lib-test_lockup-Minimum-fix-to-get-it-compiled-on-PR.patch b/debian/patches-rt/lib-test_lockup-Minimum-fix-to-get-it-compiled-on-PR.patch
new file mode 100644
index 000000000..d10747343
--- /dev/null
+++ b/debian/patches-rt/lib-test_lockup-Minimum-fix-to-get-it-compiled-on-PR.patch
@@ -0,0 +1,58 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 28 Oct 2020 18:55:27 +0100
+Subject: [PATCH] lib/test_lockup: Minimum fix to get it compiled on PREEMPT_RT
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+On PREEMPT_RT the locks are quite different so they can't be tested as
+it is done below. The alternative is test for the waitlock within
+rtmutex.
+
+This is the bare minim to get it compiled. Problems which exists on
+PREEMP_RT:
+- none of the locks (spinlock_t, rwlock_t, mutex_t, rw_semaphore) may be
+ acquired with disabled preemption or interrupts.
+ If I read the code correct the it is possible to acquire a mutex with
+ disabled interrupts.
+ I don't know how to obtain a lock pointer. Technically they are not
+ exported to userland.
+
+- memory can not be allocated with disabled premption or interrupts even
+ with GFP_ATOMIC.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ lib/test_lockup.c | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/lib/test_lockup.c
++++ b/lib/test_lockup.c
+@@ -480,6 +480,21 @@ static int __init test_lockup_init(void)
+ return -EINVAL;
+
+ #ifdef CONFIG_DEBUG_SPINLOCK
++#ifdef CONFIG_PREEMPT_RT
++ if (test_magic(lock_spinlock_ptr,
++ offsetof(spinlock_t, lock.wait_lock.magic),
++ SPINLOCK_MAGIC) ||
++ test_magic(lock_rwlock_ptr,
++ offsetof(rwlock_t, rtmutex.wait_lock.magic),
++ SPINLOCK_MAGIC) ||
++ test_magic(lock_mutex_ptr,
++ offsetof(struct mutex, lock.wait_lock.magic),
++ SPINLOCK_MAGIC) ||
++ test_magic(lock_rwsem_ptr,
++ offsetof(struct rw_semaphore, rtmutex.wait_lock.magic),
++ SPINLOCK_MAGIC))
++ return -EINVAL;
++#else
+ if (test_magic(lock_spinlock_ptr,
+ offsetof(spinlock_t, rlock.magic),
+ SPINLOCK_MAGIC) ||
+@@ -494,6 +509,7 @@ static int __init test_lockup_init(void)
+ SPINLOCK_MAGIC))
+ return -EINVAL;
+ #endif
++#endif
+
+ if ((wait_state != TASK_RUNNING ||
+ (call_cond_resched && !reacquire_locks) ||
diff --git a/debian/patches-rt/localversion.patch b/debian/patches-rt/localversion.patch
index a652479ae..d2361b6e1 100644
--- a/debian/patches-rt/localversion.patch
+++ b/debian/patches-rt/localversion.patch
@@ -1,7 +1,7 @@
Subject: Add localversion for -RT release
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 08 Jul 2011 20:25:16 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
@@ -11,4 +11,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt16
++-rt20
diff --git a/debian/patches-rt/lockdep-disable-self-test.patch b/debian/patches-rt/lockdep-disable-self-test.patch
index 57aca7896..8a80f3f0f 100644
--- a/debian/patches-rt/lockdep-disable-self-test.patch
+++ b/debian/patches-rt/lockdep-disable-self-test.patch
@@ -4,7 +4,7 @@ Subject: [PATCH] lockdep: disable self-test
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The self-test wasn't always 100% accurate for RT. We disabled a few
tests which failed because they had a different semantic for RT. Some
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
-@@ -1349,7 +1349,7 @@ config DEBUG_ATOMIC_SLEEP
+@@ -1332,7 +1332,7 @@ config DEBUG_ATOMIC_SLEEP
config DEBUG_LOCKING_API_SELFTESTS
bool "Locking API boot-time self-tests"
diff --git a/debian/patches-rt/lockdep-no-softirq-accounting-on-rt.patch b/debian/patches-rt/lockdep-no-softirq-accounting-on-rt.patch
index 570d31a16..52f1327c5 100644
--- a/debian/patches-rt/lockdep-no-softirq-accounting-on-rt.patch
+++ b/debian/patches-rt/lockdep-no-softirq-accounting-on-rt.patch
@@ -1,7 +1,7 @@
Subject: lockdep: Make it RT aware
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 18:51:23 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
teach lockdep that we don't really do softirqs on -RT.
diff --git a/debian/patches-rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch b/debian/patches-rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
index 096a5a94d..f8c18b880 100644
--- a/debian/patches-rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
+++ b/debian/patches-rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
@@ -1,7 +1,7 @@
From: Josh Cartwright <josh.cartwright@ni.com>
Date: Wed, 28 Jan 2015 13:08:45 -0600
Subject: lockdep: selftest: fix warnings due to missing PREEMPT_RT conditionals
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
"lockdep: Selftest: Only do hardirq context test for raw spinlock"
disabled the execution of certain tests with PREEMPT_RT, but did
diff --git a/debian/patches-rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch b/debian/patches-rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
index 100ca137a..fe61b1869 100644
--- a/debian/patches-rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
+++ b/debian/patches-rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
@@ -1,7 +1,7 @@
Subject: lockdep: selftest: Only do hardirq context test for raw spinlock
From: Yong Zhang <yong.zhang0@gmail.com>
Date: Mon, 16 Apr 2012 15:01:56 +0800
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
From: Yong Zhang <yong.zhang@windriver.com>
diff --git a/debian/patches-rt/locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch b/debian/patches-rt/locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch
index 887303aee..31db1cda3 100644
--- a/debian/patches-rt/locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch
+++ b/debian/patches-rt/locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 19 Nov 2019 09:25:04 +0100
Subject: [PATCH] locking: Make spinlock_t and rwlock_t a RCU section on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
On !RT a locked spinlock_t and rwlock_t disables preemption which
implies a RCU read section. There is code that relies on that behaviour.
diff --git a/debian/patches-rt/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch b/debian/patches-rt/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch
index 1ea6665c6..7690b1ab0 100644
--- a/debian/patches-rt/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch
+++ b/debian/patches-rt/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 4 Aug 2017 17:40:42 +0200
Subject: [PATCH 1/2] locking: don't check for __LINUX_SPINLOCK_TYPES_H on -RT
archs
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Upstream uses arch_spinlock_t within spinlock_t and requests that
spinlock_types.h header file is included first.
diff --git a/debian/patches-rt/md-raid5-percpu-handling-rt-aware.patch b/debian/patches-rt/md-raid5-percpu-handling-rt-aware.patch
index f03e9f03e..705268108 100644
--- a/debian/patches-rt/md-raid5-percpu-handling-rt-aware.patch
+++ b/debian/patches-rt/md-raid5-percpu-handling-rt-aware.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 6 Apr 2010 16:51:31 +0200
Subject: md: raid5: Make raid5_percpu handling RT aware
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
__raid_run_ops() disables preemption with get_cpu() around the access
to the raid5_percpu variables. That causes scheduling while atomic
diff --git a/debian/patches-rt/mips-disable-highmem-on-rt.patch b/debian/patches-rt/mips-disable-highmem-on-rt.patch
index 20e46ec8f..47d4343ad 100644
--- a/debian/patches-rt/mips-disable-highmem-on-rt.patch
+++ b/debian/patches-rt/mips-disable-highmem-on-rt.patch
@@ -1,7 +1,7 @@
Subject: mips: Disable highmem on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 18 Jul 2011 17:10:12 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The current highmem handling on -RT is not compatible and needs fixups.
diff --git a/debian/patches-rt/mm-disable-sloub-rt.patch b/debian/patches-rt/mm-disable-sloub-rt.patch
index ad0426da1..406b1aedc 100644
--- a/debian/patches-rt/mm-disable-sloub-rt.patch
+++ b/debian/patches-rt/mm-disable-sloub-rt.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:44:03 -0500
Subject: mm: Allow only SLUB on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Memory allocation disables interrupts as part of the allocation and freeing
process. For -RT it is important that this section remain short and don't
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1871,6 +1871,7 @@ choice
+@@ -1872,6 +1872,7 @@ choice
config SLAB
bool "SLAB"
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
The regular slab allocator that is established and known to work
-@@ -1891,6 +1892,7 @@ config SLUB
+@@ -1892,6 +1893,7 @@ config SLUB
config SLOB
depends on EXPERT
bool "SLOB (Simple Allocator)"
diff --git a/debian/patches-rt/mm-fix-exec-activate_mm-vs-TLB-shootdown-and-lazy-tl.patch b/debian/patches-rt/mm-fix-exec-activate_mm-vs-TLB-shootdown-and-lazy-tl.patch
index 4346bd24c..02f210941 100644
--- a/debian/patches-rt/mm-fix-exec-activate_mm-vs-TLB-shootdown-and-lazy-tl.patch
+++ b/debian/patches-rt/mm-fix-exec-activate_mm-vs-TLB-shootdown-and-lazy-tl.patch
@@ -2,7 +2,7 @@ From: Nicholas Piggin <npiggin@gmail.com>
Date: Fri, 28 Aug 2020 20:00:19 +1000
Subject: [PATCH] mm: fix exec activate_mm vs TLB shootdown and lazy tlb
switching race
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Reading and modifying current->mm and current->active_mm and switching
mm should be done with irqs off, to prevent races seeing an intermediate
diff --git a/debian/patches-rt/mm-make-vmstat-rt-aware.patch b/debian/patches-rt/mm-make-vmstat-rt-aware.patch
index 203b915b0..e82440af3 100644
--- a/debian/patches-rt/mm-make-vmstat-rt-aware.patch
+++ b/debian/patches-rt/mm-make-vmstat-rt-aware.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:30:13 -0500
Subject: mm/vmstat: Protect per cpu variables with preempt disable on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Disable preemption on -RT for the vmstat code. On vanila the code runs in
IRQ-off regions while on -RT it is not. "preempt_disable" ensures that the
diff --git a/debian/patches-rt/mm-memcontrol-Disable-preemption-in-__mod_memcg_lruv.patch b/debian/patches-rt/mm-memcontrol-Disable-preemption-in-__mod_memcg_lruv.patch
new file mode 100644
index 000000000..c600f4659
--- /dev/null
+++ b/debian/patches-rt/mm-memcontrol-Disable-preemption-in-__mod_memcg_lruv.patch
@@ -0,0 +1,38 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 28 Oct 2020 18:15:32 +0100
+Subject: [PATCH] mm/memcontrol: Disable preemption in
+ __mod_memcg_lruvec_state()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+The callers expect disabled preemption/interrupts while invoking
+__mod_memcg_lruvec_state(). This works mainline because a lock of
+somekind is acquired.
+
+Use preempt_disable_rt() where per-CPU variables are accessed and a
+stable pointer is expected. This is also done in __mod_zone_page_state()
+for the same reason.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/memcontrol.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -821,6 +821,7 @@ void __mod_memcg_lruvec_state(struct lru
+ pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+ memcg = pn->memcg;
+
++ preempt_disable_rt();
+ /* Update memcg */
+ __mod_memcg_state(memcg, idx, val);
+
+@@ -840,6 +841,7 @@ void __mod_memcg_lruvec_state(struct lru
+ x = 0;
+ }
+ __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
++ preempt_enable_rt();
+ }
+
+ /**
diff --git a/debian/patches-rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/debian/patches-rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
index 6626e6c14..4be6c84ea 100644
--- a/debian/patches-rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
+++ b/debian/patches-rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
@@ -1,7 +1,7 @@
From: Yang Shi <yang.shi@windriver.com>
Subject: mm/memcontrol: Don't call schedule_work_on in preemption disabled context
Date: Wed, 30 Oct 2013 11:48:33 -0700
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The following trace is triggered when running ltp oom test cases:
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -2301,7 +2301,7 @@ static void drain_all_stock(struct mem_c
+@@ -2303,7 +2303,7 @@ static void drain_all_stock(struct mem_c
* as well as workers from this path always operate on the local
* per-cpu data. CPU up doesn't touch memcg_stock at all.
*/
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
-@@ -2324,7 +2324,7 @@ static void drain_all_stock(struct mem_c
+@@ -2326,7 +2326,7 @@ static void drain_all_stock(struct mem_c
schedule_work_on(cpu, &stock->work);
}
}
diff --git a/debian/patches-rt/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch b/debian/patches-rt/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
index 95a3289d2..7c27307f9 100644
--- a/debian/patches-rt/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
+++ b/debian/patches-rt/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 18 Aug 2020 10:30:00 +0200
Subject: [PATCH] mm: memcontrol: Provide a local_lock for per-CPU memcg_stock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The interrupts are disabled to ensure CPU-local access to the per-CPU
variable `memcg_stock'.
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -2154,6 +2154,7 @@ void unlock_page_memcg(struct page *page
+@@ -2156,6 +2156,7 @@ void unlock_page_memcg(struct page *page
EXPORT_SYMBOL(unlock_page_memcg);
struct memcg_stock_pcp {
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct mem_cgroup *cached; /* this never be root cgroup */
unsigned int nr_pages;
-@@ -2205,7 +2206,7 @@ static bool consume_stock(struct mem_cgr
+@@ -2207,7 +2208,7 @@ static bool consume_stock(struct mem_cgr
if (nr_pages > MEMCG_CHARGE_BATCH)
return ret;
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stock = this_cpu_ptr(&memcg_stock);
if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
-@@ -2213,7 +2214,7 @@ static bool consume_stock(struct mem_cgr
+@@ -2215,7 +2216,7 @@ static bool consume_stock(struct mem_cgr
ret = true;
}
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -2248,14 +2249,14 @@ static void drain_local_stock(struct wor
+@@ -2250,14 +2251,14 @@ static void drain_local_stock(struct wor
* The only protection from memory hotplug vs. drain_stock races is
* that we always operate on local CPU stock here with IRQ disabled
*/
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2267,7 +2268,7 @@ static void refill_stock(struct mem_cgro
+@@ -2269,7 +2270,7 @@ static void refill_stock(struct mem_cgro
struct memcg_stock_pcp *stock;
unsigned long flags;
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached != memcg) { /* reset if necessary */
-@@ -2280,7 +2281,7 @@ static void refill_stock(struct mem_cgro
+@@ -2282,7 +2283,7 @@ static void refill_stock(struct mem_cgro
if (stock->nr_pages > MEMCG_CHARGE_BATCH)
drain_stock(stock);
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3084,7 +3085,7 @@ static bool consume_obj_stock(struct obj
+@@ -3086,7 +3087,7 @@ static bool consume_obj_stock(struct obj
unsigned long flags;
bool ret = false;
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stock = this_cpu_ptr(&memcg_stock);
if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
-@@ -3092,7 +3093,7 @@ static bool consume_obj_stock(struct obj
+@@ -3094,7 +3095,7 @@ static bool consume_obj_stock(struct obj
ret = true;
}
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -3151,7 +3152,7 @@ static void refill_obj_stock(struct obj_
+@@ -3153,7 +3154,7 @@ static void refill_obj_stock(struct obj_
struct memcg_stock_pcp *stock;
unsigned long flags;
@@ -109,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached_objcg != objcg) { /* reset if necessary */
-@@ -3165,7 +3166,7 @@ static void refill_obj_stock(struct obj_
+@@ -3167,7 +3168,7 @@ static void refill_obj_stock(struct obj_
if (stock->nr_bytes > PAGE_SIZE)
drain_obj_stock(stock);
@@ -118,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
-@@ -7050,9 +7051,13 @@ static int __init mem_cgroup_init(void)
+@@ -7052,9 +7053,13 @@ static int __init mem_cgroup_init(void)
cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
memcg_hotplug_cpu_dead);
diff --git a/debian/patches-rt/mm-memcontrol-do_not_disable_irq.patch b/debian/patches-rt/mm-memcontrol-do_not_disable_irq.patch
index 4413f77cd..699548df0 100644
--- a/debian/patches-rt/mm-memcontrol-do_not_disable_irq.patch
+++ b/debian/patches-rt/mm-memcontrol-do_not_disable_irq.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Subject: mm/memcontrol: Replace local_irq_disable with local locks
Date: Wed, 28 Jan 2015 17:14:16 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
There are a few local_irq_disable() which then take sleeping locks. This
patch converts them local locks.
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
-@@ -5682,12 +5690,12 @@ static int mem_cgroup_move_account(struc
+@@ -5684,12 +5692,12 @@ static int mem_cgroup_move_account(struc
ret = 0;
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
unlock_page(page);
out:
-@@ -6723,10 +6731,10 @@ int mem_cgroup_charge(struct page *page,
+@@ -6725,10 +6733,10 @@ int mem_cgroup_charge(struct page *page,
css_get(&memcg->css);
commit_charge(page, memcg);
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -6770,11 +6778,11 @@ static void uncharge_batch(const struct
+@@ -6772,11 +6780,11 @@ static void uncharge_batch(const struct
memcg_oom_recover(ug->memcg);
}
@@ -79,7 +79,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* drop reference from uncharge_page */
css_put(&ug->memcg->css);
-@@ -6928,10 +6936,10 @@ void mem_cgroup_migrate(struct page *old
+@@ -6930,10 +6938,10 @@ void mem_cgroup_migrate(struct page *old
css_get(&memcg->css);
commit_charge(newpage, memcg);
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
-@@ -7106,6 +7114,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -7108,6 +7116,7 @@ void mem_cgroup_swapout(struct page *pag
struct mem_cgroup *memcg, *swap_memcg;
unsigned int nr_entries;
unsigned short oldid;
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -7151,9 +7160,13 @@ void mem_cgroup_swapout(struct page *pag
+@@ -7153,9 +7162,13 @@ void mem_cgroup_swapout(struct page *pag
* important here to have the interrupts disabled because it is the
* only synchronisation we have for updating the per-CPU variables.
*/
diff --git a/debian/patches-rt/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch b/debian/patches-rt/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch
index 407b81ba8..78f980c69 100644
--- a/debian/patches-rt/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch
+++ b/debian/patches-rt/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 2 Jul 2020 14:27:23 +0200
Subject: [PATCH] mm/page_alloc: Use migrate_disable() in
drain_local_pages_wq()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
drain_local_pages_wq() disables preemption to avoid CPU migration during
CPU hotplug.
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -2986,9 +2986,9 @@ static void drain_local_pages_wq(struct
+@@ -2987,9 +2987,9 @@ static void drain_local_pages_wq(struct
* cpu which is allright but we also have to make sure to not move to
* a different one.
*/
diff --git a/debian/patches-rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/debian/patches-rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index 58447f107..fe9b18311 100644
--- a/debian/patches-rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/debian/patches-rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -1,7 +1,7 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:37 -0500
Subject: mm: page_alloc: rt-friendly per-cpu pages
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
rt-friendly per-cpu pages: convert the irqs-off per-cpu locking
method into a preemptible, explicit-per-cpu-locks method.
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/page_owner.h>
#include <linux/kthread.h>
#include <linux/memcontrol.h>
-@@ -356,6 +357,13 @@ EXPORT_SYMBOL(nr_node_ids);
+@@ -357,6 +358,13 @@ EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
#endif
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-@@ -1493,10 +1501,10 @@ static void __free_pages_ok(struct page
+@@ -1494,10 +1502,10 @@ static void __free_pages_ok(struct page
return;
migratetype = get_pfnblock_migratetype(page, pfn);
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void __free_pages_core(struct page *page, unsigned int order)
-@@ -2899,13 +2907,13 @@ void drain_zone_pages(struct zone *zone,
+@@ -2900,13 +2908,13 @@ void drain_zone_pages(struct zone *zone,
int to_drain, batch;
LIST_HEAD(dst);
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (to_drain > 0)
free_pcppages_bulk(zone, &dst, false);
-@@ -2927,7 +2935,7 @@ static void drain_pages_zone(unsigned in
+@@ -2928,7 +2936,7 @@ static void drain_pages_zone(unsigned in
LIST_HEAD(dst);
int count;
@@ -78,7 +78,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
-@@ -2935,7 +2943,7 @@ static void drain_pages_zone(unsigned in
+@@ -2936,7 +2944,7 @@ static void drain_pages_zone(unsigned in
if (count)
isolate_pcp_pages(count, pcp, &dst);
@@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (count)
free_pcppages_bulk(zone, &dst, false);
-@@ -3185,9 +3193,9 @@ void free_unref_page(struct page *page)
+@@ -3186,9 +3194,9 @@ void free_unref_page(struct page *page)
if (!free_unref_page_prepare(page, pfn))
return;
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!list_empty(&dst))
free_pcppages_bulk(zone, &dst, false);
}
-@@ -3214,7 +3222,7 @@ void free_unref_page_list(struct list_he
+@@ -3215,7 +3223,7 @@ void free_unref_page_list(struct list_he
set_page_private(page, pfn);
}
@@ -108,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_private(page);
enum zone_type type;
-@@ -3229,12 +3237,12 @@ void free_unref_page_list(struct list_he
+@@ -3230,12 +3238,12 @@ void free_unref_page_list(struct list_he
* a large list of pages to free.
*/
if (++batch_count == SWAP_CLUSTER_MAX) {
@@ -124,7 +124,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for (i = 0; i < __MAX_NR_ZONES; ) {
struct page *page;
-@@ -3403,7 +3411,7 @@ static struct page *rmqueue_pcplist(stru
+@@ -3404,7 +3412,7 @@ static struct page *rmqueue_pcplist(stru
struct page *page;
unsigned long flags;
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list);
-@@ -3411,7 +3419,7 @@ static struct page *rmqueue_pcplist(stru
+@@ -3412,7 +3420,7 @@ static struct page *rmqueue_pcplist(stru
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
zone_statistics(preferred_zone, zone);
}
@@ -142,7 +142,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return page;
}
-@@ -3445,7 +3453,8 @@ struct page *rmqueue(struct zone *prefer
+@@ -3446,7 +3454,8 @@ struct page *rmqueue(struct zone *prefer
* allocate greater than order-1 page units with __GFP_NOFAIL.
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
@@ -152,7 +152,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
page = NULL;
-@@ -3471,7 +3480,7 @@ struct page *rmqueue(struct zone *prefer
+@@ -3472,7 +3481,7 @@ struct page *rmqueue(struct zone *prefer
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
@@ -161,7 +161,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
out:
/* Separate test+clear to avoid unnecessary atomics */
-@@ -3484,7 +3493,7 @@ struct page *rmqueue(struct zone *prefer
+@@ -3485,7 +3494,7 @@ struct page *rmqueue(struct zone *prefer
return page;
failed:
@@ -170,7 +170,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NULL;
}
-@@ -8755,7 +8764,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8758,7 +8767,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -179,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -8764,7 +8773,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8767,7 +8776,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
diff --git a/debian/patches-rt/mm-rt-kmap-atomic-scheduling.patch b/debian/patches-rt/mm-rt-kmap-atomic-scheduling.patch
index c0426bec9..fa6f24c88 100644
--- a/debian/patches-rt/mm-rt-kmap-atomic-scheduling.patch
+++ b/debian/patches-rt/mm-rt-kmap-atomic-scheduling.patch
@@ -1,7 +1,7 @@
Subject: mm, rt: kmap_atomic scheduling
From: Peter Zijlstra <peterz@infradead.org>
Date: Thu, 28 Jul 2011 10:43:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
In fact, with migrate_disable() existing one could play games with
kmap_atomic. You could save/restore the kmap_atomic slots on context
diff --git a/debian/patches-rt/mm-scatterlist-dont-disable-irqs-on-RT.patch b/debian/patches-rt/mm-scatterlist-dont-disable-irqs-on-RT.patch
index 42919ee5d..3f61fe0b3 100644
--- a/debian/patches-rt/mm-scatterlist-dont-disable-irqs-on-RT.patch
+++ b/debian/patches-rt/mm-scatterlist-dont-disable-irqs-on-RT.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 3 Jul 2009 08:44:34 -0500
Subject: mm/scatterlist: Do not disable irqs on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
For -RT it is enough to keep pagefault disabled (which is currently handled by
kmap_atomic()).
diff --git a/debian/patches-rt/mm-slub-Always-flush-the-delayed-empty-slubs-in-flus.patch b/debian/patches-rt/mm-slub-Always-flush-the-delayed-empty-slubs-in-flus.patch
index 3ff464b84..a5de9a974 100644
--- a/debian/patches-rt/mm-slub-Always-flush-the-delayed-empty-slubs-in-flus.patch
+++ b/debian/patches-rt/mm-slub-Always-flush-the-delayed-empty-slubs-in-flus.patch
@@ -1,7 +1,7 @@
From: Kevin Hao <haokexin@gmail.com>
Date: Mon, 4 May 2020 11:34:07 +0800
Subject: [PATCH] mm: slub: Always flush the delayed empty slubs in flush_all()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
After commit f0b231101c94 ("mm/SLUB: delay giving back empty slubs to
IRQ enabled regions"), when the free_slab() is invoked with the IRQ
diff --git a/debian/patches-rt/mm-slub-Make-object_map_lock-a-raw_spinlock_t.patch b/debian/patches-rt/mm-slub-Make-object_map_lock-a-raw_spinlock_t.patch
index fd3b9fa7c..ea83d99c0 100644
--- a/debian/patches-rt/mm-slub-Make-object_map_lock-a-raw_spinlock_t.patch
+++ b/debian/patches-rt/mm-slub-Make-object_map_lock-a-raw_spinlock_t.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 16 Jul 2020 18:47:50 +0200
Subject: [PATCH] mm/slub: Make object_map_lock a raw_spinlock_t
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The variable object_map is protected by object_map_lock. The lock is always
acquired in debug code and within already atomic context
diff --git a/debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch b/debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch
index a6b8f95fd..03112e5a0 100644
--- a/debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch
+++ b/debian/patches-rt/mm-vmalloc-use-get-cpu-light.patch
@@ -1,7 +1,7 @@
Subject: mm/vmalloc: Another preempt disable region which sucks
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 12 Jul 2011 11:39:36 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Avoid the preempt disable version of get_cpu_var(). The inner-lock should
provide enough serialisation.
diff --git a/debian/patches-rt/mm-workingset-replace-IRQ-off-check-with-a-lockdep-a.patch b/debian/patches-rt/mm-workingset-replace-IRQ-off-check-with-a-lockdep-a.patch
index fdf7ba387..e406313e7 100644
--- a/debian/patches-rt/mm-workingset-replace-IRQ-off-check-with-a-lockdep-a.patch
+++ b/debian/patches-rt/mm-workingset-replace-IRQ-off-check-with-a-lockdep-a.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 11 Feb 2019 10:40:46 +0100
Subject: [PATCH] mm: workingset: replace IRQ-off check with a lockdep assert.
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Commit
diff --git a/debian/patches-rt/mm-zswap-Use-local-lock-to-protect-per-CPU-data.patch b/debian/patches-rt/mm-zswap-Use-local-lock-to-protect-per-CPU-data.patch
index 479f40a81..e1ce3db5a 100644
--- a/debian/patches-rt/mm-zswap-Use-local-lock-to-protect-per-CPU-data.patch
+++ b/debian/patches-rt/mm-zswap-Use-local-lock-to-protect-per-CPU-data.patch
@@ -1,7 +1,7 @@
From: "Luis Claudio R. Goncalves" <lgoncalv@redhat.com>
Date: Tue, 25 Jun 2019 11:28:04 -0300
Subject: [PATCH] mm/zswap: Use local lock to protect per-CPU data
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
zwap uses per-CPU compression. The per-CPU data pointer is acquired with
get_cpu_ptr() which implicitly disables preemption. It allocates
diff --git a/debian/patches-rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch b/debian/patches-rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
index 4fbee64bc..12d6e8128 100644
--- a/debian/patches-rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
+++ b/debian/patches-rt/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
@@ -1,7 +1,7 @@
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Tue, 22 Mar 2016 11:16:09 +0100
Subject: [PATCH] mm/zsmalloc: copy with get_cpu_var() and locking
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
get_cpu_var() disables preemption and triggers a might_sleep() splat later.
This is replaced with get_locked_var().
diff --git a/debian/patches-rt/net--Move-lockdep-where-it-belongs.patch b/debian/patches-rt/net--Move-lockdep-where-it-belongs.patch
index 537ee3e00..1e40e4e1d 100644
--- a/debian/patches-rt/net--Move-lockdep-where-it-belongs.patch
+++ b/debian/patches-rt/net--Move-lockdep-where-it-belongs.patch
@@ -1,7 +1,7 @@
Subject: net: Move lockdep where it belongs
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 08 Sep 2020 07:32:20 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
diff --git a/debian/patches-rt/net-Dequeue-in-dev_cpu_dead-without-the-lock.patch b/debian/patches-rt/net-Dequeue-in-dev_cpu_dead-without-the-lock.patch
index e38d60d5a..2911f5131 100644
--- a/debian/patches-rt/net-Dequeue-in-dev_cpu_dead-without-the-lock.patch
+++ b/debian/patches-rt/net-Dequeue-in-dev_cpu_dead-without-the-lock.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 16 Sep 2020 16:15:39 +0200
Subject: [PATCH] net: Dequeue in dev_cpu_dead() without the lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Upstream uses skb_dequeue() to acquire lock of `input_pkt_queue'. The reason is
to synchronize against a remote CPU which still thinks that the CPU is online
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -10649,7 +10649,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -10729,7 +10729,7 @@ static int dev_cpu_dead(unsigned int old
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
diff --git a/debian/patches-rt/net-Properly-annotate-the-try-lock-for-the-seqlock.patch b/debian/patches-rt/net-Properly-annotate-the-try-lock-for-the-seqlock.patch
index 5c0f31acb..ef850ef5d 100644
--- a/debian/patches-rt/net-Properly-annotate-the-try-lock-for-the-seqlock.patch
+++ b/debian/patches-rt/net-Properly-annotate-the-try-lock-for-the-seqlock.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 8 Sep 2020 16:57:11 +0200
Subject: [PATCH] net: Properly annotate the try-lock for the seqlock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
In patch
("net/Qdisc: use a seqlock instead seqcount")
diff --git a/debian/patches-rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch b/debian/patches-rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch
index d91d8b773..dbed8e0b1 100644
--- a/debian/patches-rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch
+++ b/debian/patches-rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 14 Sep 2016 17:36:35 +0200
Subject: [PATCH] net/Qdisc: use a seqlock instead seqcount
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The seqcount disables preemption on -RT while it is held which can't
remove. Also we don't want the reader to spin for ages if the writer is
diff --git a/debian/patches-rt/net-core-use-local_bh_disable-in-netif_rx_ni.patch b/debian/patches-rt/net-core-use-local_bh_disable-in-netif_rx_ni.patch
index 6d88982e4..4e0c986cc 100644
--- a/debian/patches-rt/net-core-use-local_bh_disable-in-netif_rx_ni.patch
+++ b/debian/patches-rt/net-core-use-local_bh_disable-in-netif_rx_ni.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 16 Jun 2017 19:03:16 +0200
Subject: [PATCH] net/core: use local_bh_disable() in netif_rx_ni()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
In 2004 netif_rx_ni() gained a preempt_disable() section around
netif_rx() and its do_softirq() + testing for it. The do_softirq() part
diff --git a/debian/patches-rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/debian/patches-rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
index b3d4630c1..4d942b6be 100644
--- a/debian/patches-rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
+++ b/debian/patches-rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 30 Mar 2016 13:36:29 +0200
Subject: [PATCH] net: dev: always take qdisc's busylock in __dev_xmit_skb()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The root-lock is dropped before dev_hard_start_xmit() is invoked and after
setting the __QDISC___STATE_RUNNING bit. If this task is now pushed away
diff --git a/debian/patches-rt/net_disable_NET_RX_BUSY_POLL.patch b/debian/patches-rt/net_disable_NET_RX_BUSY_POLL.patch
index c39311f00..36c303614 100644
--- a/debian/patches-rt/net_disable_NET_RX_BUSY_POLL.patch
+++ b/debian/patches-rt/net_disable_NET_RX_BUSY_POLL.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat, 27 May 2017 19:02:06 +0200
Subject: net/core: disable NET_RX_BUSY_POLL on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
napi_busy_loop() disables preemption and performs a NAPI poll. We can't acquire
sleeping locks with disabled preemption so we would have to work around this
diff --git a/debian/patches-rt/oleg-signal-rt-fix.patch b/debian/patches-rt/oleg-signal-rt-fix.patch
index d304821e0..6e573e8ad 100644
--- a/debian/patches-rt/oleg-signal-rt-fix.patch
+++ b/debian/patches-rt/oleg-signal-rt-fix.patch
@@ -1,7 +1,7 @@
From: Oleg Nesterov <oleg@redhat.com>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: signal/x86: Delay calling signals in atomic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
On x86_64 we must disable preemption before we enable interrupts
for stack faults, int3 and debugging, because the current task is using
diff --git a/debian/patches-rt/panic-disable-random-on-rt.patch b/debian/patches-rt/panic-disable-random-on-rt.patch
index f28bf6020..97046e511 100644
--- a/debian/patches-rt/panic-disable-random-on-rt.patch
+++ b/debian/patches-rt/panic-disable-random-on-rt.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: panic: skip get_random_bytes for RT_FULL in init_oops_id
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Disable on -RT. If this is invoked from irq-context we will have problems
to acquire the sleeping lock.
diff --git a/debian/patches-rt/pid.h-include-atomic.h.patch b/debian/patches-rt/pid.h-include-atomic.h.patch
index 73708c497..d8b7c45d8 100644
--- a/debian/patches-rt/pid.h-include-atomic.h.patch
+++ b/debian/patches-rt/pid.h-include-atomic.h.patch
@@ -1,7 +1,7 @@
From: Grygorii Strashko <Grygorii.Strashko@linaro.org>
Date: Tue, 21 Jul 2015 19:43:56 +0300
Subject: pid.h: include atomic.h
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
This patch fixes build error:
CC kernel/pid_namespace.o
diff --git a/debian/patches-rt/power-disable-highmem-on-rt.patch b/debian/patches-rt/power-disable-highmem-on-rt.patch
index 04090e480..f16965820 100644
--- a/debian/patches-rt/power-disable-highmem-on-rt.patch
+++ b/debian/patches-rt/power-disable-highmem-on-rt.patch
@@ -1,7 +1,7 @@
Subject: powerpc: Disable highmem on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 18 Jul 2011 17:08:34 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The current highmem handling on -RT is not compatible and needs fixups.
diff --git a/debian/patches-rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch b/debian/patches-rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
index 15b2f926b..a3a38e4af 100644
--- a/debian/patches-rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
+++ b/debian/patches-rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
@@ -1,7 +1,7 @@
From: Bogdan Purcareata <bogdan.purcareata@freescale.com>
Date: Fri, 24 Apr 2015 15:53:13 +0000
Subject: powerpc/kvm: Disable in-kernel MPIC emulation for PREEMPT_RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
While converting the openpic emulation code to use a raw_spinlock_t enables
guests to run on RT, there's still a performance issue. For interrupts sent in
diff --git a/debian/patches-rt/powerpc-preempt-lazy-support.patch b/debian/patches-rt/powerpc-preempt-lazy-support.patch
index 4998f01a6..c79d69258 100644
--- a/debian/patches-rt/powerpc-preempt-lazy-support.patch
+++ b/debian/patches-rt/powerpc-preempt-lazy-support.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 1 Nov 2012 10:14:11 +0100
Subject: powerpc: Add support for lazy preemption
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Implement the powerpc pieces for lazy preempt.
diff --git a/debian/patches-rt/powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch b/debian/patches-rt/powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch
index ba327b321..69c5bc881 100644
--- a/debian/patches-rt/powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch
+++ b/debian/patches-rt/powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 26 Mar 2019 18:31:54 +0100
Subject: [PATCH] powerpc/pseries/iommu: Use a locallock instead
local_irq_save()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The locallock protects the per-CPU variable tce_page. The function
attempts to allocate memory while tce_page is protected (by disabling
diff --git a/debian/patches-rt/powerpc-remove-printk_nmi_.patch b/debian/patches-rt/powerpc-remove-printk_nmi_.patch
deleted file mode 100644
index e06db159d..000000000
--- a/debian/patches-rt/powerpc-remove-printk_nmi_.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 15 Feb 2019 14:34:20 +0100
-Subject: [PATCH] powerpc: remove printk_nmi_.*()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-It is no longer provided by the printk core code.
-
-Reported-by: kernel test robot <lkp@intel.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/powerpc/kexec/crash.c | 3 ---
- 1 file changed, 3 deletions(-)
-
---- a/arch/powerpc/kexec/crash.c
-+++ b/arch/powerpc/kexec/crash.c
-@@ -311,9 +311,6 @@ void default_machine_crash_shutdown(stru
- unsigned int i;
- int (*old_handler)(struct pt_regs *regs);
-
-- /* Avoid hardlocking with irresponsive CPU holding logbuf_lock */
-- printk_nmi_enter();
--
- /*
- * This function is only called after the system
- * has panicked or is otherwise in a critical state.
diff --git a/debian/patches-rt/powerpc-stackprotector-work-around-stack-guard-init-.patch b/debian/patches-rt/powerpc-stackprotector-work-around-stack-guard-init-.patch
index 36ef7fc91..2b909114d 100644
--- a/debian/patches-rt/powerpc-stackprotector-work-around-stack-guard-init-.patch
+++ b/debian/patches-rt/powerpc-stackprotector-work-around-stack-guard-init-.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 26 Mar 2019 18:31:29 +0100
Subject: [PATCH ] powerpc/stackprotector: work around stack-guard init from
atomic
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
This is invoked from the secondary CPU in atomic context. On x86 we use
tsc instead. On Power we XOR it against mftb() so lets use stack address
diff --git a/debian/patches-rt/preempt-lazy-support.patch b/debian/patches-rt/preempt-lazy-support.patch
index 71a642a73..b7a477b0f 100644
--- a/debian/patches-rt/preempt-lazy-support.patch
+++ b/debian/patches-rt/preempt-lazy-support.patch
@@ -1,7 +1,7 @@
Subject: sched: Add support for lazy preemption
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 26 Oct 2012 18:50:54 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
It has become an obsession to mitigate the determinism vs. throughput
loss of RT. Looking at the mainline semantics of preemption points
@@ -53,7 +53,7 @@ performance.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/preempt.h | 35 +++++++++++++++++-
+ include/linux/preempt.h | 51 +++++++++++++++++++++++++-
include/linux/sched.h | 38 +++++++++++++++++++
include/linux/thread_info.h | 12 +++++-
include/linux/trace_events.h | 1
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/trace/trace.h | 2 +
kernel/trace/trace_events.c | 1
kernel/trace/trace_output.c | 14 ++++++-
- 13 files changed, 229 insertions(+), 32 deletions(-)
+ 13 files changed, 243 insertions(+), 34 deletions(-)
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -104,21 +104,26 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define sched_preempt_enable_no_resched() \
do { \
barrier(); \
-@@ -216,6 +236,13 @@ do { \
+@@ -216,6 +236,18 @@ do { \
__preempt_schedule(); \
} while (0)
++/*
++ * open code preempt_check_resched() because it is not exported to modules and
++ * used by local_unlock() or bpf_enable_instrumentation().
++ */
+#define preempt_lazy_enable() \
+do { \
+ dec_preempt_lazy_count(); \
+ barrier(); \
-+ preempt_check_resched(); \
++ if (should_resched(0)) \
++ __preempt_schedule(); \
+} while (0)
+
#else /* !CONFIG_PREEMPTION */
#define preempt_enable() \
do { \
-@@ -223,6 +250,12 @@ do { \
+@@ -223,6 +255,12 @@ do { \
preempt_count_dec(); \
} while (0)
@@ -131,7 +136,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define preempt_enable_notrace() \
do { \
barrier(); \
-@@ -282,7 +315,7 @@ do { \
+@@ -282,7 +320,7 @@ do { \
} while (0)
#define preempt_fold_need_resched() \
do { \
@@ -140,6 +145,24 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
set_preempt_need_resched(); \
} while (0)
+@@ -410,8 +448,15 @@ extern void migrate_enable(void);
+
+ #elif defined(CONFIG_PREEMPT_RT)
+
+-static inline void migrate_disable(void) { }
+-static inline void migrate_enable(void) { }
++static inline void migrate_disable(void)
++{
++ preempt_lazy_disable();
++}
++
++static inline void migrate_enable(void)
++{
++ preempt_lazy_enable();
++}
+
+ #else /* !CONFIG_PREEMPT_RT */
+
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1837,6 +1837,44 @@ static inline int test_tsk_need_resched(
@@ -299,7 +322,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
trace_sched_migrate_enable_tp(p);
-@@ -3671,6 +3715,9 @@ int sched_fork(unsigned long clone_flags
+@@ -3816,6 +3860,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -309,7 +332,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -4913,6 +4960,7 @@ static void __sched notrace __schedule(b
+@@ -5057,6 +5104,7 @@ static void __sched notrace __schedule(b
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -317,7 +340,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
if (likely(prev != next)) {
-@@ -5109,6 +5157,30 @@ static void __sched notrace preempt_sche
+@@ -5253,6 +5301,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -348,7 +371,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPTION
/*
* This is the entry point to schedule() from in-kernel preemption
-@@ -5122,7 +5194,8 @@ asmlinkage __visible void __sched notrac
+@@ -5266,7 +5338,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -358,7 +381,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -5162,6 +5235,9 @@ asmlinkage __visible void __sched notrac
+@@ -5306,6 +5379,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -368,7 +391,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -7000,7 +7076,9 @@ void init_idle(struct task_struct *idle,
+@@ -7144,7 +7220,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
diff --git a/debian/patches-rt/preempt-nort-rt-variants.patch b/debian/patches-rt/preempt-nort-rt-variants.patch
index 2a133e7da..96f1b4053 100644
--- a/debian/patches-rt/preempt-nort-rt-variants.patch
+++ b/debian/patches-rt/preempt-nort-rt-variants.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 24 Jul 2009 12:38:56 +0200
Subject: preempt: Provide preempt_*_(no)rt variants
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
RT needs a few preempt_disable/enable points which are not necessary
otherwise. Implement variants to avoid #ifdeffery.
diff --git a/debian/patches-rt/printk-Force-a-line-break-on-pr_cont-n.patch b/debian/patches-rt/printk-Force-a-line-break-on-pr_cont-n.patch
deleted file mode 100644
index ab9711301..000000000
--- a/debian/patches-rt/printk-Force-a-line-break-on-pr_cont-n.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From: =?UTF-8?q?=E6=B1=AA=E5=8B=8710269566?= <wang.yong12@zte.com.cn>
-Date: Thu, 21 May 2020 09:37:44 +0800
-Subject: [PATCH] printk: Force a line break on pr_cont("\n")
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-Since the printk rework, pr_cont("\n") will not lead to a line break.
-A new line will only be created if
-- cpu != c->cpu_owner || !(flags & LOG_CONT)
-- c->len + len > sizeof(c->buf)
-
-Flush the buffer to enforce a new line on pr_cont().
-
-[bigeasy: reword commit message ]
-
-Signed-off-by: 汪勇10269566 <wang.yong12@zte.com.cn>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Acked-by: John Ogness <john.ogness@linutronix.de>
----
- kernel/printk/printk.c | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -1876,6 +1876,7 @@ static void cont_add(int ctx, int cpu, u
- // but later continuations can add a newline.
- if (flags & LOG_NEWLINE) {
- c->flags |= LOG_NEWLINE;
-+ cont_flush(ctx);
- }
- }
-
diff --git a/debian/patches-rt/printk-Tiny-cleanup.patch b/debian/patches-rt/printk-Tiny-cleanup.patch
new file mode 100644
index 000000000..8b17550a3
--- /dev/null
+++ b/debian/patches-rt/printk-Tiny-cleanup.patch
@@ -0,0 +1,155 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 20 Oct 2020 18:48:16 +0200
+Subject: [PATCH] printk: Tiny cleanup
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+- mark functions and variables static which are used only in this file.
+- add printf annotation where appropriate
+- remove static functions without caller
+- add kdb header file for kgdb builds.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 64 +++++++++++++++----------------------------------
+ 1 file changed, 20 insertions(+), 44 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -49,6 +49,7 @@
+ #include <linux/sched/clock.h>
+ #include <linux/sched/debug.h>
+ #include <linux/sched/task_stack.h>
++#include <linux/kdb.h>
+
+ #include <linux/uaccess.h>
+ #include <asm/sections.h>
+@@ -339,11 +340,11 @@ enum log_flags {
+ };
+
+ /* The syslog_lock protects syslog_* variables. */
+-DEFINE_RAW_SPINLOCK(syslog_lock);
+-#define syslog_lock_irq() raw_spin_lock_irq(&syslog_lock)
+-#define syslog_unlock_irq() raw_spin_unlock_irq(&syslog_lock)
+-#define syslog_lock_irqsave(flags) raw_spin_lock_irqsave(&syslog_lock, flags)
+-#define syslog_unlock_irqrestore(flags) raw_spin_unlock_irqrestore(&syslog_lock, flags)
++static DEFINE_SPINLOCK(syslog_lock);
++#define syslog_lock_irq() spin_lock_irq(&syslog_lock)
++#define syslog_unlock_irq() spin_unlock_irq(&syslog_lock)
++#define syslog_lock_irqsave(flags) spin_lock_irqsave(&syslog_lock, flags)
++#define syslog_unlock_irqrestore(flags) spin_unlock_irqrestore(&syslog_lock, flags)
+
+ #ifdef CONFIG_PRINTK
+ DECLARE_WAIT_QUEUE_HEAD(log_wait);
+@@ -398,7 +399,7 @@ static struct printk_ringbuffer *prb = &
+ */
+ static bool __printk_percpu_data_ready __read_mostly;
+
+-bool printk_percpu_data_ready(void)
++static bool printk_percpu_data_ready(void)
+ {
+ return __printk_percpu_data_ready;
+ }
+@@ -1862,9 +1863,10 @@ static inline u32 printk_caller_id(void)
+ 0x80000000 + raw_smp_processor_id();
+ }
+
+-int vprintk_store(int facility, int level,
+- const struct dev_printk_info *dev_info,
+- const char *fmt, va_list args)
++__printf(4, 0)
++static int vprintk_store(int facility, int level,
++ const struct dev_printk_info *dev_info,
++ const char *fmt, va_list args)
+ {
+ const u32 caller_id = printk_caller_id();
+ struct prb_reserved_entry e;
+@@ -2008,13 +2010,14 @@ asmlinkage int vprintk_emit(int facility
+ }
+ EXPORT_SYMBOL(vprintk_emit);
+
+-int vprintk_default(const char *fmt, va_list args)
++ __printf(1, 0)
++static int vprintk_default(const char *fmt, va_list args)
+ {
+ return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
+ }
+-EXPORT_SYMBOL_GPL(vprintk_default);
+
+-__printf(1, 0) int vprintk_func(const char *fmt, va_list args)
++__printf(1, 0)
++static int vprintk_func(const char *fmt, va_list args)
+ {
+ #ifdef CONFIG_KGDB_KDB
+ /* Allow to pass printk() to kdb but avoid a recursion. */
+@@ -2328,34 +2331,6 @@ int is_console_locked(void)
+ }
+ EXPORT_SYMBOL(is_console_locked);
+
+-/*
+- * Check if we have any console that is capable of printing while cpu is
+- * booting or shutting down. Requires console_sem.
+- */
+-static int have_callable_console(void)
+-{
+- struct console *con;
+-
+- for_each_console(con)
+- if ((con->flags & CON_ENABLED) &&
+- (con->flags & CON_ANYTIME))
+- return 1;
+-
+- return 0;
+-}
+-
+-/*
+- * Can we actually use the console at this time on this cpu?
+- *
+- * Console drivers may assume that per-cpu resources have been allocated. So
+- * unless they're explicitly marked as being able to cope (CON_ANYTIME) don't
+- * call them until this CPU is officially up.
+- */
+-static inline int can_use_console(void)
+-{
+- return cpu_online(raw_smp_processor_id()) || have_callable_console();
+-}
+-
+ /**
+ * console_unlock - unlock the console system
+ *
+@@ -3033,7 +3008,8 @@ void wake_up_klogd(void)
+ preempt_enable();
+ }
+
+-int vprintk_deferred(const char *fmt, va_list args)
++__printf(1, 0)
++static int vprintk_deferred(const char *fmt, va_list args)
+ {
+ return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
+ }
+@@ -3303,7 +3279,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
+ * @syslog: include the "<4>" prefixes
+ * @buf: buffer to copy the line to
+ * @size: maximum size of the buffer
+- * @len: length of line placed into buffer
++ * @len_out: length of line placed into buffer
+ *
+ * Start at the end of the kmsg buffer and fill the provided buffer
+ * with as many of the the *youngest* kmsg records that fit into it.
+@@ -3470,7 +3446,7 @@ static bool __prb_trylock(struct prb_cpu
+ *
+ * It is safe to call this function from any context and state.
+ */
+-void prb_lock(struct prb_cpulock *cpu_lock, unsigned int *cpu_store)
++static void prb_lock(struct prb_cpulock *cpu_lock, unsigned int *cpu_store)
+ {
+ for (;;) {
+ if (__prb_trylock(cpu_lock, cpu_store))
+@@ -3488,7 +3464,7 @@ void prb_lock(struct prb_cpulock *cpu_lo
+ *
+ * It is safe to call this function from any context and state.
+ */
+-void prb_unlock(struct prb_cpulock *cpu_lock, unsigned int cpu_store)
++static void prb_unlock(struct prb_cpulock *cpu_lock, unsigned int cpu_store)
+ {
+ unsigned long *flags;
+ unsigned int cpu;
diff --git a/debian/patches-rt/printk-console-must-not-schedule-for-drivers.patch b/debian/patches-rt/printk-console-must-not-schedule-for-drivers.patch
deleted file mode 100644
index f0d15e9c4..000000000
--- a/debian/patches-rt/printk-console-must-not-schedule-for-drivers.patch
+++ /dev/null
@@ -1,45 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Mon, 6 Apr 2020 23:22:17 +0200
-Subject: [PATCH] printk: console must not schedule for drivers
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-Even though the printk kthread is always preemptible, it is still not
-allowed to call cond_resched() from within console drivers. The
-task may become non-preemptible in the console driver call chain. For
-example, vt_console_print() takes a spinlock and then can call into
-fbcon_redraw(), which can conditionally invoke cond_resched():
-
-|BUG: sleeping function called from invalid context at kernel/printk/printk.c:2322
-|in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 177, name: printk
-|CPU: 0 PID: 177 Comm: printk Not tainted 5.6.2-00011-ga536059557f1d9 #1
-|Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
-|Call Trace:
-| dump_stack+0x66/0x8b
-| ___might_sleep+0x102/0x120
-| console_conditional_schedule+0x24/0x30
-| fbcon_redraw+0x96/0x1c0
-| fbcon_scroll+0x556/0xd70
-| con_scroll+0x147/0x1e0
-| lf+0x9e/0xb0
-| vt_console_print+0x253/0x3d0
-| printk_kthread_func+0x1d5/0x3b0
-
-Disable cond_resched() for the call into the console drivers.
-
-Reported-by: kernel test robot <rong.a.chen@intel.com>
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk.c | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -3101,6 +3101,7 @@ static int printk_kthread_func(void *dat
- &len, printk_time);
-
- console_lock();
-+ console_may_schedule = 0;
- if (len > 0 || ext_len > 0) {
- call_console_drivers(ext_text, ext_len, text, len);
- boot_delay_msec(msg->level);
diff --git a/debian/patches-rt/printk-devkmsg-llseek-reset-clear-if-it-is-lost.patch b/debian/patches-rt/printk-devkmsg-llseek-reset-clear-if-it-is-lost.patch
deleted file mode 100644
index a08669612..000000000
--- a/debian/patches-rt/printk-devkmsg-llseek-reset-clear-if-it-is-lost.patch
+++ /dev/null
@@ -1,46 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Fri, 22 Feb 2019 23:02:44 +0100
-Subject: [PATCH] printk: devkmsg: llseek: reset clear if it is lost
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-SEEK_DATA will seek to the last clear record. If this clear record
-is no longer in the ring buffer, devkmsg_llseek() will go into an
-infinite loop. Fix that by resetting the clear sequence if the old
-clear record is no longer in the ring buffer.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk.c | 7 ++++++-
- 1 file changed, 6 insertions(+), 1 deletion(-)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -769,6 +769,7 @@ static loff_t devkmsg_llseek(struct file
- {
- struct devkmsg_user *user = file->private_data;
- loff_t ret;
-+ u64 seq;
-
- if (!user)
- return -EBADF;
-@@ -791,7 +792,7 @@ static loff_t devkmsg_llseek(struct file
- * changes no global state, and does not clear anything.
- */
- for (;;) {
-- prb_iter_init(&user->iter, &printk_rb, NULL);
-+ prb_iter_init(&user->iter, &printk_rb, &seq);
- ret = prb_iter_seek(&user->iter, clear_seq);
- if (ret > 0) {
- /* seeked to clear seq */
-@@ -808,6 +809,10 @@ static loff_t devkmsg_llseek(struct file
- break;
- }
- /* iterator invalid, start over */
-+
-+ /* reset clear_seq if it is no longer available */
-+ if (seq > clear_seq)
-+ clear_seq = 0;
- }
- ret = 0;
- break;
diff --git a/debian/patches-rt/printk-devkmsg-read-Return-EPIPE-when-the-first-mess.patch b/debian/patches-rt/printk-devkmsg-read-Return-EPIPE-when-the-first-mess.patch
deleted file mode 100644
index 5fe870908..000000000
--- a/debian/patches-rt/printk-devkmsg-read-Return-EPIPE-when-the-first-mess.patch
+++ /dev/null
@@ -1,44 +0,0 @@
-From: He Zhe <zhe.he@windriver.com>
-Date: Tue, 24 Sep 2019 15:26:39 +0800
-Subject: [PATCH] printk: devkmsg: read: Return EPIPE when the first
- message user-space wants has gone
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-When user-space wants to read the first message, that is when user->seq
-is 0, and that message has gone, it currently automatically resets
-user->seq to current first seq. This mis-aligns with mainline kernel.
-
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/ABI/testing/dev-kmsg#n39
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/kernel/printk/printk.c#n899
-
-We should inform user-space that what it wants has gone by returning EPIPE
-in such scenario.
-
-Link: https://lore.kernel.org/r/20190924072639.25986-1-zhe.he@windriver.com
-Signed-off-by: He Zhe <zhe.he@windriver.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk.c | 12 ++++--------
- 1 file changed, 4 insertions(+), 8 deletions(-)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -713,14 +713,10 @@ static ssize_t devkmsg_read(struct file
- goto out;
- }
-
-- if (user->seq == 0) {
-- user->seq = seq;
-- } else {
-- user->seq++;
-- if (user->seq < seq) {
-- ret = -EPIPE;
-- goto restore_out;
-- }
-+ user->seq++;
-+ if (user->seq < seq) {
-+ ret = -EPIPE;
-+ goto restore_out;
- }
-
- msg = (struct printk_log *)&user->msgbuf[0];
diff --git a/debian/patches-rt/printk-fix-ifnullfree.cocci-warnings.patch b/debian/patches-rt/printk-fix-ifnullfree.cocci-warnings.patch
deleted file mode 100644
index 73bfb6276..000000000
--- a/debian/patches-rt/printk-fix-ifnullfree.cocci-warnings.patch
+++ /dev/null
@@ -1,47 +0,0 @@
-From: Julia Lawall <julia.lawall@inria.fr>
-Date: Fri, 24 Jul 2020 12:05:31 +0200
-Subject: [PATCH] printk: fix ifnullfree.cocci warnings
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-Make the code a little simpler by dropping
-some unneeded tests.
-
-Generated by: scripts/coccinelle/free/ifnullfree.cocci
-
-Fixes: c406fbce2054 ("printk: implement syslog")
-CC: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: kernel test robot <lkp@intel.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Acked-by: John Ogness <john.ogness@linutronix.de>
----
- kernel/printk/printk.c | 12 ++++--------
- 1 file changed, 4 insertions(+), 8 deletions(-)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -1466,10 +1466,8 @@ static int syslog_print_all(char __user
- if (clear && !seq)
- syslog_clear();
-
-- if (text)
-- kfree(text);
-- if (msgbuf)
-- kfree(msgbuf);
-+ kfree(text);
-+ kfree(msgbuf);
- return len;
- }
-
-@@ -1622,10 +1620,8 @@ int do_syslog(int type, char __user *buf
- break;
- }
- out:
-- if (msgbuf)
-- kfree(msgbuf);
-- if (text)
-- kfree(text);
-+ kfree(msgbuf);
-+ kfree(text);
- return error;
- }
-
diff --git a/debian/patches-rt/printk-hack-out-emergency-loglevel-usage.patch b/debian/patches-rt/printk-hack-out-emergency-loglevel-usage.patch
deleted file mode 100644
index b77eb29f7..000000000
--- a/debian/patches-rt/printk-hack-out-emergency-loglevel-usage.patch
+++ /dev/null
@@ -1,53 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Tue, 3 Dec 2019 09:14:57 +0100
-Subject: [PATCH] printk: hack out emergency loglevel usage
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-Instead of using an emergency loglevel to determine if atomic
-messages should be printed, use oops_in_progress. This conforms
-to the decision that latency-causing atomic messages never be
-generated during normal operation.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk.c | 13 +++----------
- 1 file changed, 3 insertions(+), 10 deletions(-)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -1782,15 +1782,8 @@ static void call_console_drivers(u64 seq
- con->wrote_history = 1;
- con->printk_seq = seq - 1;
- }
-- if (con->write_atomic && level < emergency_console_loglevel &&
-- facility == 0) {
-- /* skip emergency messages, already printed */
-- if (con->printk_seq < seq)
-- con->printk_seq = seq;
-- continue;
-- }
- if (con->flags & CON_BOOT && facility == 0) {
-- /* skip emergency messages, already printed */
-+ /* skip boot messages, already printed */
- if (con->printk_seq < seq)
- con->printk_seq = seq;
- continue;
-@@ -3233,7 +3226,7 @@ static bool console_can_emergency(int le
- for_each_console(con) {
- if (!(con->flags & CON_ENABLED))
- continue;
-- if (con->write_atomic && level < emergency_console_loglevel)
-+ if (con->write_atomic && oops_in_progress)
- return true;
- if (con->write && (con->flags & CON_BOOT))
- return true;
-@@ -3249,7 +3242,7 @@ static void call_emergency_console_drive
- for_each_console(con) {
- if (!(con->flags & CON_ENABLED))
- continue;
-- if (con->write_atomic && level < emergency_console_loglevel) {
-+ if (con->write_atomic && oops_in_progress) {
- con->write_atomic(con, text, text_len);
- continue;
- }
diff --git a/debian/patches-rt/printk-handle-iterating-while-buffer-changing.patch b/debian/patches-rt/printk-handle-iterating-while-buffer-changing.patch
deleted file mode 100644
index 91c483460..000000000
--- a/debian/patches-rt/printk-handle-iterating-while-buffer-changing.patch
+++ /dev/null
@@ -1,44 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Mon, 7 Oct 2019 16:20:39 +0200
-Subject: [PATCH] printk: handle iterating while buffer changing
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-The syslog and kmsg_dump readers are provided buffers to fill.
-Both try to maximize the provided buffer usage by calculating the
-maximum number of messages that can fit. However, if after the
-calculation, messages are dropped and new messages added, the
-calculation will no longer match.
-
-For syslog, add a check to make sure the provided buffer is not
-overfilled.
-
-For kmsg_dump, start over by recalculating the messages
-available.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk.c | 5 ++++-
- 1 file changed, 4 insertions(+), 1 deletion(-)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -1454,6 +1454,9 @@ static int syslog_print_all(char __user
- break;
- }
-
-+ if (len + textlen > size)
-+ break;
-+
- if (copy_to_user(buf + len, text, textlen))
- len = -EFAULT;
- else
-@@ -3147,7 +3150,7 @@ bool kmsg_dump_get_buffer(struct kmsg_du
- ret = prb_iter_next(&iter, msgbuf, PRINTK_RECORD_MAX, &seq);
- if (ret == 0) {
- break;
-- } else if (ret < 0) {
-+ } else if (ret < 0 || seq >= end_seq) {
- prb_iter_init(&iter, &printk_rb, &seq);
- goto retry;
- }
diff --git a/debian/patches-rt/printk-kmsg_dump-remove-mutex-usage.patch b/debian/patches-rt/printk-kmsg_dump-remove-mutex-usage.patch
deleted file mode 100644
index 388d0e590..000000000
--- a/debian/patches-rt/printk-kmsg_dump-remove-mutex-usage.patch
+++ /dev/null
@@ -1,85 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Wed, 24 Apr 2019 16:36:04 +0200
-Subject: [PATCH] printk: kmsg_dump: remove mutex usage
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-The kmsg dumper can be called from any context, but the dumping
-helpers were using a mutex to synchronize the iterator against
-concurrent dumps.
-
-Rather than trying to synchronize the iterator, use a local copy
-of the iterator during the dump. Then no synchronization is
-required.
-
-Reported-by: Scott Wood <swood@redhat.com>
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk.c | 23 ++++++++++-------------
- 1 file changed, 10 insertions(+), 13 deletions(-)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -379,8 +379,6 @@ static u64 syslog_seq;
- static size_t syslog_partial;
- static bool syslog_time;
-
--static DEFINE_MUTEX(kmsg_dump_lock);
--
- /* the next printk record to read after the last 'clear' command */
- static u64 clear_seq;
-
-@@ -2932,6 +2930,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
- */
- void kmsg_dump(enum kmsg_dump_reason reason)
- {
-+ struct kmsg_dumper dumper_local;
- struct kmsg_dumper *dumper;
-
- rcu_read_lock();
-@@ -2949,16 +2948,18 @@ void kmsg_dump(enum kmsg_dump_reason rea
- if (reason > max_reason)
- continue;
-
-- /* initialize iterator with data about the stored records */
-- dumper->active = true;
-+ /*
-+ * use a local copy to avoid modifying the
-+ * iterator used by any other cpus/contexts
-+ */
-+ memcpy(&dumper_local, dumper, sizeof(dumper_local));
-
-- kmsg_dump_rewind(dumper);
-+ /* initialize iterator with data about the stored records */
-+ dumper_local.active = true;
-+ kmsg_dump_rewind(&dumper_local);
-
- /* invoke dumper which will iterate over records */
-- dumper->dump(dumper, reason);
--
-- /* reset iterator */
-- dumper->active = false;
-+ dumper_local.dump(&dumper_local, reason);
- }
- rcu_read_unlock();
- }
-@@ -3070,9 +3071,7 @@ bool kmsg_dump_get_line(struct kmsg_dump
- {
- bool ret;
-
-- mutex_lock(&kmsg_dump_lock);
- ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
-- mutex_unlock(&kmsg_dump_lock);
-
- return ret;
- }
-@@ -3224,9 +3223,7 @@ void kmsg_dump_rewind_nolock(struct kmsg
- */
- void kmsg_dump_rewind(struct kmsg_dumper *dumper)
- {
-- mutex_lock(&kmsg_dump_lock);
- kmsg_dump_rewind_nolock(dumper);
-- mutex_unlock(&kmsg_dump_lock);
- }
- EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
-
diff --git a/debian/patches-rt/printk-only-allow-kernel-to-emergency-message.patch b/debian/patches-rt/printk-only-allow-kernel-to-emergency-message.patch
deleted file mode 100644
index a18cdb268..000000000
--- a/debian/patches-rt/printk-only-allow-kernel-to-emergency-message.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Sun, 17 Feb 2019 03:11:20 +0100
-Subject: [PATCH] printk: only allow kernel to emergency message
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-Emergency messages exist as a mechanism for the kernel to
-communicate critical information to users. It is not meant for
-use by userspace. Only allow facility=0 messages to be
-processed by the emergency message code.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk.c | 17 +++++++++++------
- 1 file changed, 11 insertions(+), 6 deletions(-)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -1762,7 +1762,8 @@ static void printk_write_history(struct
- * The console_lock must be held.
- */
- static void call_console_drivers(u64 seq, const char *ext_text, size_t ext_len,
-- const char *text, size_t len, int level)
-+ const char *text, size_t len, int level,
-+ int facility)
- {
- struct console *con;
-
-@@ -1779,13 +1780,14 @@ static void call_console_drivers(u64 seq
- con->wrote_history = 1;
- con->printk_seq = seq - 1;
- }
-- if (con->write_atomic && level < emergency_console_loglevel) {
-+ if (con->write_atomic && level < emergency_console_loglevel &&
-+ facility == 0) {
- /* skip emergency messages, already printed */
- if (con->printk_seq < seq)
- con->printk_seq = seq;
- continue;
- }
-- if (con->flags & CON_BOOT) {
-+ if (con->flags & CON_BOOT && facility == 0) {
- /* skip emergency messages, already printed */
- if (con->printk_seq < seq)
- con->printk_seq = seq;
-@@ -1956,7 +1958,10 @@ asmlinkage int vprintk_emit(int facility
- * - text points to beginning of text
- * - there is room before text for prefix
- */
-- printk_emergency(rbuf, level & 7, ts_nsec, cpu, text, text_len);
-+ if (facility == 0) {
-+ /* only the kernel can create emergency messages */
-+ printk_emergency(rbuf, level & 7, ts_nsec, cpu, text, text_len);
-+ }
-
- if ((lflags & LOG_CONT) || !(lflags & LOG_NEWLINE)) {
- cont_add(ctx, cpu, caller_id, facility, level, lflags, text, text_len);
-@@ -2753,8 +2758,8 @@ static int printk_kthread_func(void *dat
-
- console_lock();
- console_may_schedule = 0;
-- call_console_drivers(master_seq, ext_text,
-- ext_len, text, len, msg->level);
-+ call_console_drivers(master_seq, ext_text, ext_len, text, len,
-+ msg->level, msg->facility);
- if (len > 0 || ext_len > 0)
- printk_delay(msg->level);
- console_unlock();
diff --git a/debian/patches-rt/printk-print-rate-limitted-message-as-info.patch b/debian/patches-rt/printk-print-rate-limitted-message-as-info.patch
deleted file mode 100644
index a241770d9..000000000
--- a/debian/patches-rt/printk-print-rate-limitted-message-as-info.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 22 Feb 2019 12:47:13 +0100
-Subject: [PATCH] printk: print "rate-limitted" message as info
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-If messages which are injected via kmsg are dropped then they don't need
-to be printed as warnings. This is to avoid latency spikes if the
-interface decides to print a lot of important messages.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/ratelimit.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/include/linux/ratelimit.h
-+++ b/include/linux/ratelimit.h
-@@ -28,7 +28,7 @@ static inline void ratelimit_state_exit(
- return;
-
- if (rs->missed) {
-- pr_warn("%s: %d output lines suppressed due to ratelimiting\n",
-+ pr_info("%s: %d output lines suppressed due to ratelimiting\n",
- current->comm, rs->missed);
- rs->missed = 0;
- }
diff --git a/debian/patches-rt/printk-set-deferred-to-default-loglevel-enforce-mask.patch b/debian/patches-rt/printk-set-deferred-to-default-loglevel-enforce-mask.patch
deleted file mode 100644
index 586c529ee..000000000
--- a/debian/patches-rt/printk-set-deferred-to-default-loglevel-enforce-mask.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Thu, 14 Feb 2019 23:13:30 +0100
-Subject: [PATCH] printk: set deferred to default loglevel, enforce mask
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-All messages printed via vpritnk_deferred() were being
-automatically treated as emergency messages.
-
-Messages printed via vprintk_deferred() should be set to the
-default loglevel. LOGLEVEL_SCHED is no longer relevant.
-
-Also, enforce the loglevel mask for emergency messages.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/printk/printk.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -1956,7 +1956,7 @@ asmlinkage int vprintk_emit(int facility
- * - text points to beginning of text
- * - there is room before text for prefix
- */
-- printk_emergency(rbuf, level, ts_nsec, cpu, text, text_len);
-+ printk_emergency(rbuf, level & 7, ts_nsec, cpu, text, text_len);
-
- if ((lflags & LOG_CONT) || !(lflags & LOG_NEWLINE)) {
- cont_add(ctx, cpu, caller_id, facility, level, lflags, text, text_len);
-@@ -2782,7 +2782,7 @@ late_initcall(init_printk_kthread);
-
- __printf(1, 0) static int vprintk_deferred(const char *fmt, va_list args)
- {
-- return vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
-+ return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
- }
-
- int printk_deferred(const char *fmt, ...)
diff --git a/debian/patches-rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/debian/patches-rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 72825af0e..3753528f5 100644
--- a/debian/patches-rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/debian/patches-rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 29 Aug 2013 18:21:04 +0200
Subject: ptrace: fix ptrace vs tasklist_lock race
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
As explained by Alexander Fyodorov <halcy@yandex.ru>:
@@ -116,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irq(&task->sighand->siglock);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2424,6 +2424,18 @@ int migrate_swap(struct task_struct *cur
+@@ -2568,6 +2568,18 @@ int migrate_swap(struct task_struct *cur
}
#endif /* CONFIG_NUMA_BALANCING */
@@ -135,7 +135,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* wait_task_inactive - wait for a thread to unschedule.
*
-@@ -2468,7 +2480,7 @@ unsigned long wait_task_inactive(struct
+@@ -2612,7 +2624,7 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
cpu_relax();
}
-@@ -2483,7 +2495,8 @@ unsigned long wait_task_inactive(struct
+@@ -2627,7 +2639,8 @@ unsigned long wait_task_inactive(struct
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
diff --git a/debian/patches-rt/random-make-it-work-on-rt.patch b/debian/patches-rt/random-make-it-work-on-rt.patch
index 80a7989eb..ba70535b9 100644
--- a/debian/patches-rt/random-make-it-work-on-rt.patch
+++ b/debian/patches-rt/random-make-it-work-on-rt.patch
@@ -1,7 +1,7 @@
Subject: random: Make it work on rt
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 21 Aug 2012 20:38:50 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Delegate the random insertion to the forced threaded interrupt
handler. Store the return IP of the hard interrupt handler in the irq
diff --git a/debian/patches-rt/rcu--Prevent-false-positive-softirq-warning-on-RT.patch b/debian/patches-rt/rcu--Prevent-false-positive-softirq-warning-on-RT.patch
index 273c75da1..62684f676 100644
--- a/debian/patches-rt/rcu--Prevent-false-positive-softirq-warning-on-RT.patch
+++ b/debian/patches-rt/rcu--Prevent-false-positive-softirq-warning-on-RT.patch
@@ -1,7 +1,7 @@
Subject: rcu: Prevent false positive softirq warning on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 31 Aug 2020 17:26:08 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Soft interrupt disabled sections can legitimately be preempted or schedule
out when blocking on a lock on RT enabled kernels so the RCU preempt check
diff --git a/debian/patches-rt/rcu-Use-rcuc-threads-on-PREEMPT_RT-as-we-did.patch b/debian/patches-rt/rcu-Use-rcuc-threads-on-PREEMPT_RT-as-we-did.patch
index d8925c3ad..5596bcb02 100644
--- a/debian/patches-rt/rcu-Use-rcuc-threads-on-PREEMPT_RT-as-we-did.patch
+++ b/debian/patches-rt/rcu-Use-rcuc-threads-on-PREEMPT_RT-as-we-did.patch
@@ -1,7 +1,7 @@
From: Scott Wood <swood@redhat.com>
Date: Wed, 11 Sep 2019 17:57:28 +0100
Subject: [PATCH] rcu: Use rcuc threads on PREEMPT_RT as we did
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
While switching to the reworked RCU-thread code, it has been forgotten
to enable the thread processing on -RT.
diff --git a/debian/patches-rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch b/debian/patches-rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
index 05deda323..f7b8c2c7c 100644
--- a/debian/patches-rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
+++ b/debian/patches-rt/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
@@ -1,7 +1,7 @@
From: Julia Cartwright <julia@ni.com>
Date: Wed, 12 Oct 2016 11:21:14 -0500
Subject: [PATCH] rcu: enable rcu_normal_after_boot by default for RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The forcing of an expedited grace period is an expensive and very
RT-application unfriendly operation, as it forcibly preempts all running
diff --git a/debian/patches-rt/rcu-make-RCU_BOOST-default-on-RT.patch b/debian/patches-rt/rcu-make-RCU_BOOST-default-on-RT.patch
index dec0af704..449b774cc 100644
--- a/debian/patches-rt/rcu-make-RCU_BOOST-default-on-RT.patch
+++ b/debian/patches-rt/rcu-make-RCU_BOOST-default-on-RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 21 Mar 2014 20:19:05 +0100
Subject: rcu: make RCU_BOOST default on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Since it is no longer invoked from the softirq people run into OOM more
often if the priority of the RCU thread is too low. Making boosting
diff --git a/debian/patches-rt/rcutorture-Avoid-problematic-critical-section-nestin.patch b/debian/patches-rt/rcutorture-Avoid-problematic-critical-section-nestin.patch
index 7731d907a..02408f8af 100644
--- a/debian/patches-rt/rcutorture-Avoid-problematic-critical-section-nestin.patch
+++ b/debian/patches-rt/rcutorture-Avoid-problematic-critical-section-nestin.patch
@@ -2,7 +2,7 @@ From: Scott Wood <swood@redhat.com>
Date: Wed, 11 Sep 2019 17:57:29 +0100
Subject: [PATCH] rcutorture: Avoid problematic critical section nesting
on RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
rcutorture was generating some nesting scenarios that are not
reasonable. Constrain the state selection to avoid them.
diff --git a/debian/patches-rt/rt-introduce-cpu-chill.patch b/debian/patches-rt/rt-introduce-cpu-chill.patch
index ba5bd3211..a111023fc 100644
--- a/debian/patches-rt/rt-introduce-cpu-chill.patch
+++ b/debian/patches-rt/rt-introduce-cpu-chill.patch
@@ -1,7 +1,7 @@
Subject: rt: Introduce cpu_chill()
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 07 Mar 2012 20:51:03 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Retry loops on RT might loop forever when the modifying side was
preempted. Add cpu_chill() to replace cpu_relax(). cpu_chill()
diff --git a/debian/patches-rt/rt-local-irq-lock.patch b/debian/patches-rt/rt-local-irq-lock.patch
index be3485177..abad564ef 100644
--- a/debian/patches-rt/rt-local-irq-lock.patch
+++ b/debian/patches-rt/rt-local-irq-lock.patch
@@ -1,7 +1,7 @@
Subject: rt: Add local irq locks
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 20 Jun 2011 09:03:47 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Introduce locallock. For !RT this maps to preempt_disable()/
local_irq_disable() so there is not much that changes. For RT this will
diff --git a/debian/patches-rt/sched-disable-rt-group-sched-on-rt.patch b/debian/patches-rt/sched-disable-rt-group-sched-on-rt.patch
index e399ab539..151a73e7b 100644
--- a/debian/patches-rt/sched-disable-rt-group-sched-on-rt.patch
+++ b/debian/patches-rt/sched-disable-rt-group-sched-on-rt.patch
@@ -1,7 +1,7 @@
Subject: sched: Disable CONFIG_RT_GROUP_SCHED on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 18 Jul 2011 17:03:52 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Carsten reported problems when running:
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -964,6 +964,7 @@ config CFS_BANDWIDTH
+@@ -965,6 +965,7 @@ config CFS_BANDWIDTH
config RT_GROUP_SCHED
bool "Group scheduling for SCHED_RR/FIFO"
depends on CGROUP_SCHED
diff --git a/debian/patches-rt/sched-disable-ttwu-queue.patch b/debian/patches-rt/sched-disable-ttwu-queue.patch
index 6b623aef6..7eb4f3aee 100644
--- a/debian/patches-rt/sched-disable-ttwu-queue.patch
+++ b/debian/patches-rt/sched-disable-ttwu-queue.patch
@@ -1,7 +1,7 @@
Subject: sched: Disable TTWU_QUEUE on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 13 Sep 2011 16:42:35 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The queued remote wakeup mechanism can introduce rather large
latencies if the number of migrated tasks is high. Disable it for RT.
diff --git a/debian/patches-rt/sched-limit-nr-migrate.patch b/debian/patches-rt/sched-limit-nr-migrate.patch
index d5581cf9e..8dbefa0ed 100644
--- a/debian/patches-rt/sched-limit-nr-migrate.patch
+++ b/debian/patches-rt/sched-limit-nr-migrate.patch
@@ -1,7 +1,7 @@
Subject: sched: Limit the number of task migrations per batch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 06 Jun 2011 12:12:51 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Put an upper limit on the number of tasks which are migrated per batch
to avoid large latencies.
diff --git a/debian/patches-rt/sched-might-sleep-do-not-account-rcu-depth.patch b/debian/patches-rt/sched-might-sleep-do-not-account-rcu-depth.patch
index dfc17ae19..b343f6687 100644
--- a/debian/patches-rt/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/debian/patches-rt/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -1,7 +1,7 @@
Subject: sched: Do not account rcu_preempt_depth on RT in might_sleep()
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 07 Jun 2011 09:19:06 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
RT changes the rcu_preempt_depth semantics, so we cannot check for it
in might_sleep().
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7712,7 +7712,7 @@ void __init sched_init(void)
+@@ -7853,7 +7853,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/debian/patches-rt/sched-mmdrop-delayed.patch b/debian/patches-rt/sched-mmdrop-delayed.patch
index 186c9b509..e63593b2f 100644
--- a/debian/patches-rt/sched-mmdrop-delayed.patch
+++ b/debian/patches-rt/sched-mmdrop-delayed.patch
@@ -1,7 +1,7 @@
Subject: sched: Move mmdrop to RCU on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 06 Jun 2011 12:20:33 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Takes sleeping locks and calls into the memory allocator, so nothing
we want to do in task switch and oder atomic contexts.
@@ -78,7 +78,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct mm_struct *mm;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4080,9 +4080,13 @@ static struct rq *finish_task_switch(str
+@@ -4224,9 +4224,13 @@ static struct rq *finish_task_switch(str
* provided by mmdrop(),
* - a sync_core for SYNC_CORE.
*/
@@ -93,7 +93,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
-@@ -7097,6 +7101,7 @@ void sched_setnuma(struct task_struct *p
+@@ -7241,6 +7245,7 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/debian/patches-rt/scsi-fcoe-rt-aware.patch b/debian/patches-rt/scsi-fcoe-rt-aware.patch
index 6997721b0..6f1d59116 100644
--- a/debian/patches-rt/scsi-fcoe-rt-aware.patch
+++ b/debian/patches-rt/scsi-fcoe-rt-aware.patch
@@ -1,7 +1,7 @@
Subject: scsi/fcoe: Make RT aware.
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 12 Nov 2011 14:00:48 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Do not disable preemption while taking sleeping locks. All user look safe
for migrate_diable() only.
diff --git a/debian/patches-rt/seqlock-Fix-multiple-kernel-doc-warnings.patch b/debian/patches-rt/seqlock-Fix-multiple-kernel-doc-warnings.patch
index 0885a4942..65175ea25 100644
--- a/debian/patches-rt/seqlock-Fix-multiple-kernel-doc-warnings.patch
+++ b/debian/patches-rt/seqlock-Fix-multiple-kernel-doc-warnings.patch
@@ -1,7 +1,7 @@
From: Randy Dunlap <rdunlap@infradead.org>
Date: Sun, 16 Aug 2020 17:02:00 -0700
Subject: [PATCH] seqlock: Fix multiple kernel-doc warnings
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Fix kernel-doc warnings in <linux/seqlock.h>.
diff --git a/debian/patches-rt/seqlock-Unbreak-lockdep.patch b/debian/patches-rt/seqlock-Unbreak-lockdep.patch
new file mode 100644
index 000000000..f2084fe39
--- /dev/null
+++ b/debian/patches-rt/seqlock-Unbreak-lockdep.patch
@@ -0,0 +1,69 @@
+From: "peterz@infradead.org" <peterz@infradead.org>
+Date: Tue, 15 Sep 2020 16:30:28 +0200
+Subject: [PATCH] seqlock: Unbreak lockdep
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
+
+Upstream commit 267580db047ef428a70bef8287ca62c5a450c139
+
+seqcount_LOCKNAME_init() needs to be a macro due to the lockdep
+annotation in seqcount_init(). Since a macro cannot define another
+macro, we need to effectively revert commit: e4e9ab3f9f91 ("seqlock:
+Fold seqcount_LOCKNAME_init() definition").
+
+Fixes: e4e9ab3f9f91 ("seqlock: Fold seqcount_LOCKNAME_init() definition")
+Reported-by: Qian Cai <cai@redhat.com>
+Debugged-by: Boqun Feng <boqun.feng@gmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Tested-by: Qian Cai <cai@redhat.com>
+Link: https://lkml.kernel.org/r/20200915143028.GB2674@hirez.programming.kicks-ass.net
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/seqlock.h | 22 ++++++++++++++--------
+ 1 file changed, 14 insertions(+), 8 deletions(-)
+
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -173,6 +173,19 @@ static inline void seqcount_lockdep_read
+ * @lock: Pointer to the associated lock
+ */
+
++#define seqcount_LOCKNAME_init(s, _lock, lockname) \
++ do { \
++ seqcount_##lockname##_t *____s = (s); \
++ seqcount_init(&____s->seqcount); \
++ __SEQ_LOCK(____s->lock = (_lock)); \
++ } while (0)
++
++#define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock)
++#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock)
++#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock);
++#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex);
++#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex);
++
+ /*
+ * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
+ * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
+@@ -190,13 +203,6 @@ typedef struct seqcount_##lockname {
+ __SEQ_LOCK(locktype *lock); \
+ } seqcount_##lockname##_t; \
+ \
+-static __always_inline void \
+-seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock) \
+-{ \
+- seqcount_init(&s->seqcount); \
+- __SEQ_LOCK(s->lock = lock); \
+-} \
+- \
+ static __always_inline seqcount_t * \
+ __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
+ { \
+@@ -284,8 +290,8 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct w
+ __SEQ_LOCK(.lock = (assoc_lock)) \
+ }
+
+-#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+ #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
++#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+ #define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+ #define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+ #define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
diff --git a/debian/patches-rt/serial-8250-export-symbols-which-are-used-by-symbols.patch b/debian/patches-rt/serial-8250-export-symbols-which-are-used-by-symbols.patch
deleted file mode 100644
index 71831575b..000000000
--- a/debian/patches-rt/serial-8250-export-symbols-which-are-used-by-symbols.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Sat, 16 Feb 2019 09:02:00 +0100
-Subject: [PATCH] serial: 8250: export symbols which are used by symbols
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/8250/8250_port.c | 2 ++
- kernel/printk/printk.c | 1 +
- 2 files changed, 3 insertions(+)
-
---- a/drivers/tty/serial/8250/8250_port.c
-+++ b/drivers/tty/serial/8250/8250_port.c
-@@ -2117,6 +2117,7 @@ void clear_ier(struct uart_8250_port *up
- }
- console_atomic_unlock(flags);
- }
-+EXPORT_SYMBOL_GPL(clear_ier);
-
- void restore_ier(struct uart_8250_port *up)
- {
-@@ -2128,6 +2129,7 @@ void restore_ier(struct uart_8250_port *
- serial_port_out(port, UART_IER, atomic_read(&ier_value));
- console_atomic_unlock(flags);
- }
-+EXPORT_SYMBOL_GPL(restore_ier);
-
- #ifdef CONFIG_CONSOLE_POLL
- /*
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -2251,6 +2251,7 @@ int is_console_locked(void)
- {
- return console_locked;
- }
-+EXPORT_SYMBOL(is_console_locked);
-
- /**
- * console_unlock - unlock the console system
diff --git a/debian/patches-rt/serial-8250-fsl-ingenic-mtk-fix-atomic-console.patch b/debian/patches-rt/serial-8250-fsl-ingenic-mtk-fix-atomic-console.patch
deleted file mode 100644
index ef9bfb629..000000000
--- a/debian/patches-rt/serial-8250-fsl-ingenic-mtk-fix-atomic-console.patch
+++ /dev/null
@@ -1,103 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Fri, 10 Jan 2020 16:45:32 +0106
-Subject: [PATCH] serial: 8250: fsl/ingenic/mtk: fix atomic console
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-A few 8250 implementations have their own IER access. If the port
-is a console, wrap the accesses with console_atomic_lock.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/8250/8250_fsl.c | 9 +++++++++
- drivers/tty/serial/8250/8250_ingenic.c | 7 +++++++
- drivers/tty/serial/8250/8250_mtk.c | 29 +++++++++++++++++++++++++++--
- 3 files changed, 43 insertions(+), 2 deletions(-)
-
---- a/drivers/tty/serial/8250/8250_fsl.c
-+++ b/drivers/tty/serial/8250/8250_fsl.c
-@@ -53,9 +53,18 @@ int fsl8250_handle_irq(struct uart_port
-
- /* Stop processing interrupts on input overrun */
- if ((orig_lsr & UART_LSR_OE) && (up->overrun_backoff_time_ms > 0)) {
-+ unsigned int ca_flags;
- unsigned long delay;
-+ bool is_console;
-
-+ is_console = uart_console(port);
-+
-+ if (is_console)
-+ console_atomic_lock(&ca_flags);
- up->ier = port->serial_in(port, UART_IER);
-+ if (is_console)
-+ console_atomic_unlock(ca_flags);
-+
- if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
- port->ops->stop_rx(port);
- } else {
---- a/drivers/tty/serial/8250/8250_ingenic.c
-+++ b/drivers/tty/serial/8250/8250_ingenic.c
-@@ -146,6 +146,8 @@ OF_EARLYCON_DECLARE(x1000_uart, "ingenic
-
- static void ingenic_uart_serial_out(struct uart_port *p, int offset, int value)
- {
-+ unsigned int flags;
-+ bool is_console;
- int ier;
-
- switch (offset) {
-@@ -167,7 +169,12 @@ static void ingenic_uart_serial_out(stru
- * If we have enabled modem status IRQs we should enable
- * modem mode.
- */
-+ is_console = uart_console(p);
-+ if (is_console)
-+ console_atomic_lock(&flags);
- ier = p->serial_in(p, UART_IER);
-+ if (is_console)
-+ console_atomic_unlock(flags);
-
- if (ier & UART_IER_MSI)
- value |= UART_MCR_MDCE | UART_MCR_FCM;
---- a/drivers/tty/serial/8250/8250_mtk.c
-+++ b/drivers/tty/serial/8250/8250_mtk.c
-@@ -213,12 +213,37 @@ static void mtk8250_shutdown(struct uart
-
- static void mtk8250_disable_intrs(struct uart_8250_port *up, int mask)
- {
-- serial_out(up, UART_IER, serial_in(up, UART_IER) & (~mask));
-+ struct uart_port *port = &up->port;
-+ unsigned int flags;
-+ unsigned int ier;
-+ bool is_console;
-+
-+ is_console = uart_console(port);
-+
-+ if (is_console)
-+ console_atomic_lock(&flags);
-+
-+ ier = serial_in(up, UART_IER);
-+ serial_out(up, UART_IER, ier & (~mask));
-+
-+ if (is_console)
-+ console_atomic_unlock(flags);
- }
-
- static void mtk8250_enable_intrs(struct uart_8250_port *up, int mask)
- {
-- serial_out(up, UART_IER, serial_in(up, UART_IER) | mask);
-+ struct uart_port *port = &up->port;
-+ unsigned int flags;
-+ unsigned int ier;
-+
-+ if (uart_console(port))
-+ console_atomic_lock(&flags);
-+
-+ ier = serial_in(up, UART_IER);
-+ serial_out(up, UART_IER, ier | mask);
-+
-+ if (uart_console(port))
-+ console_atomic_unlock(flags);
- }
-
- static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
diff --git a/debian/patches-rt/serial-8250-only-atomic-lock-for-console.patch b/debian/patches-rt/serial-8250-only-atomic-lock-for-console.patch
deleted file mode 100644
index dc4846a95..000000000
--- a/debian/patches-rt/serial-8250-only-atomic-lock-for-console.patch
+++ /dev/null
@@ -1,385 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Fri, 10 Jan 2020 16:45:31 +0106
-Subject: [PATCH] serial: 8250: only atomic lock for console
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-The atomic console implementation requires that IER is synchronized
-between atomic and non-atomic usage. However, it was implemented such
-that the console_atomic_lock was performed for all IER access, even
-if that port was not a console.
-
-The implementation also used a usage counter to keep track of IER
-clear/restore windows. However, this is not needed because the
-console_atomic_lock synchronization of IER access with prevent any
-situations where IER is prematurely restored or left cleared.
-
-Move the IER access functions to inline macros. They will only
-console_atomic_lock if the port is a console. Remove the
-restore_ier() function by having clear_ier() return the prior IER
-value so that the caller can restore it using set_ier(). Rename the
-IER access functions to match other 8250 wrapper macros.
-
-Suggested-by: Dick Hollenbeck <dick@softplc.com>
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/8250/8250.h | 65 ++++++++++++++++++----------
- drivers/tty/serial/8250/8250_core.c | 6 +-
- drivers/tty/serial/8250/8250_dma.c | 4 -
- drivers/tty/serial/8250/8250_port.c | 82 +++++++-----------------------------
- 4 files changed, 66 insertions(+), 91 deletions(-)
-
---- a/drivers/tty/serial/8250/8250.h
-+++ b/drivers/tty/serial/8250/8250.h
-@@ -96,10 +96,6 @@ struct serial8250_config {
- #define SERIAL8250_SHARE_IRQS 0
- #endif
-
--void set_ier(struct uart_8250_port *up, unsigned char ier);
--void clear_ier(struct uart_8250_port *up);
--void restore_ier(struct uart_8250_port *up);
--
- #define SERIAL8250_PORT_FLAGS(_base, _irq, _flags) \
- { \
- .iobase = _base, \
-@@ -134,39 +130,64 @@ static inline void serial_dl_write(struc
- up->dl_write(up, value);
- }
-
--static inline bool serial8250_set_THRI(struct uart_8250_port *up)
-+static inline void serial8250_set_IER(struct uart_8250_port *up,
-+ unsigned char ier)
- {
-- if (up->ier & UART_IER_THRI)
-- return false;
-- up->ier |= UART_IER_THRI;
-- serial_out(up, UART_IER, up->ier);
-- return true;
-+ struct uart_port *port = &up->port;
-+ unsigned int flags;
-+ bool is_console;
-+
-+ is_console = uart_console(port);
-+
-+ if (is_console)
-+ console_atomic_lock(&flags);
-+
-+ serial_out(up, UART_IER, ier);
-+
-+ if (is_console)
-+ console_atomic_unlock(flags);
- }
-
--static inline bool serial8250_set_THRI_sier(struct uart_8250_port *up)
-+static inline unsigned char serial8250_clear_IER(struct uart_8250_port *up)
- {
-- if (up->ier & UART_IER_THRI)
-- return false;
-- up->ier |= UART_IER_THRI;
-- set_ier(up, up->ier);
-- return true;
-+ struct uart_port *port = &up->port;
-+ unsigned int clearval = 0;
-+ unsigned int prior;
-+ unsigned int flags;
-+ bool is_console;
-+
-+ is_console = uart_console(port);
-+
-+ if (up->capabilities & UART_CAP_UUE)
-+ clearval = UART_IER_UUE;
-+
-+ if (is_console)
-+ console_atomic_lock(&flags);
-+
-+ prior = serial_port_in(port, UART_IER);
-+ serial_port_out(port, UART_IER, clearval);
-+
-+ if (is_console)
-+ console_atomic_unlock(flags);
-+
-+ return prior;
- }
-
--static inline bool serial8250_clear_THRI(struct uart_8250_port *up)
-+static inline bool serial8250_set_THRI(struct uart_8250_port *up)
- {
-- if (!(up->ier & UART_IER_THRI))
-+ if (up->ier & UART_IER_THRI)
- return false;
-- up->ier &= ~UART_IER_THRI;
-- serial_out(up, UART_IER, up->ier);
-+ up->ier |= UART_IER_THRI;
-+ serial8250_set_IER(up, up->ier);
- return true;
- }
-
--static inline bool serial8250_clear_THRI_sier(struct uart_8250_port *up)
-+static inline bool serial8250_clear_THRI(struct uart_8250_port *up)
- {
- if (!(up->ier & UART_IER_THRI))
- return false;
- up->ier &= ~UART_IER_THRI;
-- set_ier(up, up->ier);
-+ serial8250_set_IER(up, up->ier);
- return true;
- }
-
---- a/drivers/tty/serial/8250/8250_core.c
-+++ b/drivers/tty/serial/8250/8250_core.c
-@@ -265,7 +265,7 @@ static void serial8250_timeout(struct ti
- static void serial8250_backup_timeout(struct timer_list *t)
- {
- struct uart_8250_port *up = from_timer(up, t, timer);
-- unsigned int iir, lsr;
-+ unsigned int iir, ier = 0, lsr;
- unsigned long flags;
-
- spin_lock_irqsave(&up->port.lock, flags);
-@@ -275,7 +275,7 @@ static void serial8250_backup_timeout(st
- * based handler.
- */
- if (up->port.irq)
-- clear_ier(up);
-+ ier = serial8250_clear_IER(up);
-
- iir = serial_in(up, UART_IIR);
-
-@@ -298,7 +298,7 @@ static void serial8250_backup_timeout(st
- serial8250_tx_chars(up);
-
- if (up->port.irq)
-- restore_ier(up);
-+ serial8250_set_IER(up, ier);
-
- spin_unlock_irqrestore(&up->port.lock, flags);
-
---- a/drivers/tty/serial/8250/8250_dma.c
-+++ b/drivers/tty/serial/8250/8250_dma.c
-@@ -35,7 +35,7 @@ static void __dma_tx_complete(void *para
-
- ret = serial8250_tx_dma(p);
- if (ret)
-- serial8250_set_THRI_sier(p);
-+ serial8250_set_THRI(p);
-
- spin_unlock_irqrestore(&p->port.lock, flags);
- }
-@@ -98,7 +98,7 @@ int serial8250_tx_dma(struct uart_8250_p
- dma_async_issue_pending(dma->txchan);
- if (dma->tx_err) {
- dma->tx_err = 0;
-- serial8250_clear_THRI_sier(p);
-+ serial8250_clear_THRI(p);
- }
- return 0;
- err:
---- a/drivers/tty/serial/8250/8250_port.c
-+++ b/drivers/tty/serial/8250/8250_port.c
-@@ -757,7 +757,7 @@ static void serial8250_set_sleep(struct
- serial_out(p, UART_EFR, UART_EFR_ECB);
- serial_out(p, UART_LCR, 0);
- }
-- set_ier(p, sleep ? UART_IERX_SLEEP : 0);
-+ serial8250_set_IER(p, sleep ? UART_IERX_SLEEP : 0);
- if (p->capabilities & UART_CAP_EFR) {
- serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(p, UART_EFR, efr);
-@@ -1429,7 +1429,7 @@ static void serial8250_stop_rx(struct ua
-
- up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
- up->port.read_status_mask &= ~UART_LSR_DR;
-- set_ier(up, up->ier);
-+ serial8250_set_IER(up, up->ier);
-
- serial8250_rpm_put(up);
- }
-@@ -1459,7 +1459,7 @@ void serial8250_em485_stop_tx(struct uar
- serial8250_clear_and_reinit_fifos(p);
-
- p->ier |= UART_IER_RLSI | UART_IER_RDI;
-- set_ier(p, p->ier);
-+ serial8250_set_IER(p, p->ier);
- }
- }
- EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx);
-@@ -1515,7 +1515,7 @@ static void __stop_tx_rs485(struct uart_
-
- static inline void __do_stop_tx(struct uart_8250_port *p)
- {
-- if (serial8250_clear_THRI_sier(p))
-+ if (serial8250_clear_THRI(p))
- serial8250_rpm_put_tx(p);
- }
-
-@@ -1563,7 +1563,7 @@ static inline void __start_tx(struct uar
- if (up->dma && !up->dma->tx_dma(up))
- return;
-
-- if (serial8250_set_THRI_sier(up)) {
-+ if (serial8250_set_THRI(up)) {
- if (up->bugs & UART_BUG_TXEN) {
- unsigned char lsr;
-
-@@ -1687,7 +1687,7 @@ static void serial8250_disable_ms(struct
- mctrl_gpio_disable_ms(up->gpios);
-
- up->ier &= ~UART_IER_MSI;
-- set_ier(up, up->ier);
-+ serial8250_set_IER(up, up->ier);
- }
-
- static void serial8250_enable_ms(struct uart_port *port)
-@@ -1703,7 +1703,7 @@ static void serial8250_enable_ms(struct
- up->ier |= UART_IER_MSI;
-
- serial8250_rpm_get(up);
-- set_ier(up, up->ier);
-+ serial8250_set_IER(up, up->ier);
- serial8250_rpm_put(up);
- }
-
-@@ -2083,54 +2083,6 @@ static void wait_for_xmitr(struct uart_8
- }
- }
-
--static atomic_t ier_counter = ATOMIC_INIT(0);
--static atomic_t ier_value = ATOMIC_INIT(0);
--
--void set_ier(struct uart_8250_port *up, unsigned char ier)
--{
-- struct uart_port *port = &up->port;
-- unsigned int flags;
--
-- console_atomic_lock(&flags);
-- if (atomic_read(&ier_counter) > 0)
-- atomic_set(&ier_value, ier);
-- else
-- serial_port_out(port, UART_IER, ier);
-- console_atomic_unlock(flags);
--}
--
--void clear_ier(struct uart_8250_port *up)
--{
-- struct uart_port *port = &up->port;
-- unsigned int ier_cleared = 0;
-- unsigned int flags;
-- unsigned int ier;
--
-- console_atomic_lock(&flags);
-- atomic_inc(&ier_counter);
-- ier = serial_port_in(port, UART_IER);
-- if (up->capabilities & UART_CAP_UUE)
-- ier_cleared = UART_IER_UUE;
-- if (ier != ier_cleared) {
-- serial_port_out(port, UART_IER, ier_cleared);
-- atomic_set(&ier_value, ier);
-- }
-- console_atomic_unlock(flags);
--}
--EXPORT_SYMBOL_GPL(clear_ier);
--
--void restore_ier(struct uart_8250_port *up)
--{
-- struct uart_port *port = &up->port;
-- unsigned int flags;
--
-- console_atomic_lock(&flags);
-- if (atomic_fetch_dec(&ier_counter) == 1)
-- serial_port_out(port, UART_IER, atomic_read(&ier_value));
-- console_atomic_unlock(flags);
--}
--EXPORT_SYMBOL_GPL(restore_ier);
--
- #ifdef CONFIG_CONSOLE_POLL
- /*
- * Console polling routines for writing and reading from the uart while
-@@ -2162,10 +2114,11 @@ static int serial8250_get_poll_char(stru
- static void serial8250_put_poll_char(struct uart_port *port,
- unsigned char c)
- {
-+ unsigned int ier;
- struct uart_8250_port *up = up_to_u8250p(port);
-
- serial8250_rpm_get(up);
-- clear_ier(up);
-+ ier = serial8250_clear_IER(up);
-
- wait_for_xmitr(up, BOTH_EMPTY);
- /*
-@@ -2178,7 +2131,7 @@ static void serial8250_put_poll_char(str
- * and restore the IER
- */
- wait_for_xmitr(up, BOTH_EMPTY);
-- restore_ier(up);
-+ serial8250_set_IER(up, ier);
- serial8250_rpm_put(up);
- }
-
-@@ -2481,7 +2434,7 @@ void serial8250_do_shutdown(struct uart_
- */
- spin_lock_irqsave(&port->lock, flags);
- up->ier = 0;
-- set_ier(up, 0);
-+ serial8250_set_IER(up, 0);
- spin_unlock_irqrestore(&port->lock, flags);
-
- synchronize_irq(port->irq);
-@@ -2808,7 +2761,7 @@ serial8250_do_set_termios(struct uart_po
- if (up->capabilities & UART_CAP_RTOIE)
- up->ier |= UART_IER_RTOIE;
-
-- set_ier(up, up->ier);
-+ serial8250_set_IER(up, up->ier);
-
- if (up->capabilities & UART_CAP_EFR) {
- unsigned char efr = 0;
-@@ -3320,12 +3273,13 @@ void serial8250_console_write_atomic(str
- {
- struct uart_port *port = &up->port;
- unsigned int flags;
-+ unsigned int ier;
-
- console_atomic_lock(&flags);
-
- touch_nmi_watchdog();
-
-- clear_ier(up);
-+ ier = serial8250_clear_IER(up);
-
- if (atomic_fetch_inc(&up->console_printing)) {
- uart_console_write(port, "\n", 1,
-@@ -3335,7 +3289,7 @@ void serial8250_console_write_atomic(str
- atomic_dec(&up->console_printing);
-
- wait_for_xmitr(up, BOTH_EMPTY);
-- restore_ier(up);
-+ serial8250_set_IER(up, ier);
-
- console_atomic_unlock(flags);
- }
-@@ -3355,12 +3309,13 @@ void serial8250_console_write(struct uar
- struct uart_8250_em485 *em485 = up->em485;
- struct uart_port *port = &up->port;
- unsigned long flags;
-+ unsigned int ier;
-
- touch_nmi_watchdog();
-
- spin_lock_irqsave(&port->lock, flags);
-
-- clear_ier(up);
-+ ier = serial8250_clear_IER(up);
-
- /* check scratch reg to see if port powered off during system sleep */
- if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
-@@ -3389,8 +3344,7 @@ void serial8250_console_write(struct uar
- if (em485->tx_stopped)
- up->rs485_stop_tx(up);
- }
--
-- restore_ier(up);
-+ serial8250_set_IER(up, ier);
-
- /*
- * The receive handling will happen properly because the
diff --git a/debian/patches-rt/serial-8250-remove-that-trylock-in-serial8250_consol.patch b/debian/patches-rt/serial-8250-remove-that-trylock-in-serial8250_consol.patch
deleted file mode 100644
index 0b3b896a1..000000000
--- a/debian/patches-rt/serial-8250-remove-that-trylock-in-serial8250_consol.patch
+++ /dev/null
@@ -1,44 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu, 14 Feb 2019 17:38:24 +0100
-Subject: [PATCH] serial: 8250: remove that trylock in
- serial8250_console_write_atomic()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
-
-This does not work as rtmutex in NMI context. As per John, it is not
-needed.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/tty/serial/8250/8250_port.c | 11 -----------
- 1 file changed, 11 deletions(-)
-
---- a/drivers/tty/serial/8250/8250_port.c
-+++ b/drivers/tty/serial/8250/8250_port.c
-@@ -3318,17 +3318,9 @@ void serial8250_console_write_atomic(str
- {
- struct uart_port *port = &up->port;
- unsigned int flags;
-- bool locked;
-
- console_atomic_lock(&flags);
-
-- /*
-- * If possible, keep any other CPUs from working with the
-- * UART until the atomic message is completed. This helps
-- * to keep the output more orderly.
-- */
-- locked = spin_trylock(&port->lock);
--
- touch_nmi_watchdog();
-
- clear_ier(up);
-@@ -3343,9 +3335,6 @@ void serial8250_console_write_atomic(str
- wait_for_xmitr(up, BOTH_EMPTY);
- restore_ier(up);
-
-- if (locked)
-- spin_unlock(&port->lock);
--
- console_atomic_unlock(flags);
- }
-
diff --git a/debian/patches-rt/series b/debian/patches-rt/series
index 1a9a9eb1e..bf9069477 100644
--- a/debian/patches-rt/series
+++ b/debian/patches-rt/series
@@ -5,65 +5,45 @@
############################################################
# UPSTREAM merged
############################################################
+# John's printk series. Bits from the v5.10 merge window.
+# d594d8f411d47bf7b583ec3474b11fec348c88bb
+0001-crash-add-VMCOREINFO-macro-to-define-offset-in-a-str.patch
+0002-printk-add-lockless-ringbuffer.patch
+0003-Revert-printk-lock-unlock-console-only-for-new-logbu.patch
+0004-printk-use-the-lockless-ringbuffer.patch
+0005-MAINTAIERS-Add-John-Ogness-as-printk-reviewer.patch
+0006-printk-ringbuffer-support-dataless-records.patch
+0007-printk-reduce-LOG_BUF_SHIFT-range-for-H8300.patch
+0008-docs-vmcoreinfo-add-lockless-printk-ringbuffer-vmcor.patch
+0009-scripts-gdb-add-utils.read_ulong.patch
+0010-scripts-gdb-update-for-lockless-printk-ringbuffer.patch
+0011-printk-ringbuffer-fix-setting-state-in-desc_read.patch
+0012-printk-ringbuffer-avoid-memcpy-on-state_var.patch
+0013-printk-ringbuffer-relocate-get_data.patch
+0014-printk-ringbuffer-add-BLK_DATALESS-macro.patch
+0015-printk-ringbuffer-clear-initial-reserved-fields.patch
+0016-printk-ringbuffer-change-representation-of-states.patch
+0017-printk-ringbuffer-add-finalization-extension-support.patch
+0018-printk-reimplement-log_cont-using-record-extension.patch
+0019-printk-move-printk_info-into-separate-array.patch
+0020-printk-move-dictionary-keys-to-dev_printk_info.patch
+0021-printk-remove-dict-ring.patch
+0022-printk-avoid-and-or-handle-record-truncation.patch
+0023-printk-reduce-setup_text_buf-size-to-LOG_LINE_MAX.patch
+0024-printk-Use-fallthrough-pseudo-keyword.patch
+# eac48eb6ce10c1dc6fd3366608f4d3ca2430166c
+0025-printk-ringbuffer-Wrong-data-pointer-when-appending-.patch
############################################################
# POSTED by others
############################################################
-
-# John's printk series
-# [RFC PATCH v1 00/25] printk: new implementation
-# Date: Tue, 12 Feb 2019 15:29:38 +0100
-# Plus build fixes merged
-0001-printk-rb-add-printk-ring-buffer-documentation.patch
-0002-printk-rb-add-prb-locking-functions.patch
-0003-printk-rb-define-ring-buffer-struct-and-initializer.patch
-0004-printk-rb-add-writer-interface.patch
-0005-printk-rb-add-basic-non-blocking-reading-interface.patch
-0006-printk-rb-add-blocking-reader-support.patch
-0007-printk-rb-add-functionality-required-by-printk.patch
-0008-printk-add-ring-buffer-and-kthread.patch
-0009-printk-remove-exclusive-console-hack.patch
-printk-console-must-not-schedule-for-drivers.patch
-0010-printk-redirect-emit-store-to-new-ringbuffer.patch
-0011-printk_safe-remove-printk-safe-code.patch
-0012-printk-minimize-console-locking-implementation.patch
-0013-printk-track-seq-per-console.patch
-0014-printk-do-boot_delay_msec-inside-printk_delay.patch
-0015-printk-print-history-for-new-consoles.patch
-0016-printk-implement-CON_PRINTBUFFER.patch
-0017-printk-add-processor-number-to-output.patch
-0018-console-add-write_atomic-interface.patch
-0019-printk-introduce-emergency-messages.patch
-0020-serial-8250-implement-write_atomic.patch
-0021-printk-implement-KERN_CONT.patch
-0022-printk-implement-dev-kmsg.patch
-0023-printk-implement-syslog.patch
-0024-printk-implement-kmsg_dump.patch
-0025-printk-remove-unused-code.patch
-printk-set-deferred-to-default-loglevel-enforce-mask.patch
-serial-8250-remove-that-trylock-in-serial8250_consol.patch
-serial-8250-export-symbols-which-are-used-by-symbols.patch
-arm-remove-printk_nmi_.patch
-powerpc-remove-printk_nmi_.patch
-printk-only-allow-kernel-to-emergency-message.patch
-printk-devkmsg-llseek-reset-clear-if-it-is-lost.patch
-printk-print-rate-limitted-message-as-info.patch
-printk-kmsg_dump-remove-mutex-usage.patch
-printk-devkmsg-read-Return-EPIPE-when-the-first-mess.patch
-printk-handle-iterating-while-buffer-changing.patch
-printk-hack-out-emergency-loglevel-usage.patch
-printk-Force-a-line-break-on-pr_cont-n.patch
-serial-8250-only-atomic-lock-for-console.patch
-serial-8250-fsl-ingenic-mtk-fix-atomic-console.patch
-printk-fix-ifnullfree.cocci-warnings.patch
-
# Part of [PATCH 0/4] more mm switching vs TLB shootdown and lazy tlb
# Date: Fri, 28 Aug 2020 20:00:18 +1000
# https://lkml.kernel.org/r/20200828100022.1099682-2-npiggin@gmail.com
mm-fix-exec-activate_mm-vs-TLB-shootdown-and-lazy-tl.patch
-# 2020-10-05 16:57 Peter Zijlstra [PATCH -v2 00/17] sched: Migrate disable support
-# 20201005145717.346020688@infradead.org
+# 2020-10-23 12:11 Peter Zijlstra [PATCH v4 00/19] sched: Migrate disable support
+# 20201023101158.088940906@infradead.org
0001-stop_machine-Add-function-and-caller-debug-info.patch
0002-sched-Fix-balance_callback.patch
0003-sched-hotplug-Ensure-only-per-cpu-kthreads-run-durin.patch
@@ -81,6 +61,8 @@ mm-fix-exec-activate_mm-vs-TLB-shootdown-and-lazy-tl.patch
0015-sched-Fix-migrate_disable-vs-rt-dl-balancing.patch
0016-sched-proc-Print-accurate-cpumask-vs-migrate_disable.patch
0017-sched-Add-migrate_disable-tracepoints.patch
+0018-sched-Deny-self-issued-__set_cpus_allowed_ptr-when-m.patch
+0019-sched-Comment-affine_move_task.patch
############################################################
# POSTED
@@ -97,10 +79,35 @@ io_wq-Make-io_wqe-lock-a-raw_spinlock_t.patch
# 20200915074816.52zphpywj4zidspk@linutronix.de
bus-mhi-Remove-include-of-rwlock_types.h.patch
+# 20201028141251.3608598-1-bigeasy@linutronix.de
+0001-blk-mq-Don-t-complete-on-a-remote-CPU-in-force-threa.patch
+0002-blk-mq-Always-complete-remote-completions-requests-i.patch
+0003-blk-mq-Use-llist_head-for-blk_cpu_done.patch
+
+# 20201028181041.xyeothhkouc3p4md@linutronix.de
+lib-test_lockup-Minimum-fix-to-get-it-compiled-on-PR.patch
+
############################################################
# Ready for posting
############################################################
+# John's printk series.
+0001-printk-refactor-kmsg_dump_get_buffer.patch
+0002-printk-use-buffer-pools-for-sprint-buffers.patch
+0003-printk-change-clear_seq-to-atomic64_t.patch
+0004-printk-remove-logbuf_lock-add-syslog_lock.patch
+0005-printk-remove-safe-buffers.patch
+0006-console-add-write_atomic-interface.patch
+0007-serial-8250-implement-write_atomic.patch
+0008-printk-inline-log_output-log_store-in-vprintk_store.patch
+0009-printk-relocate-printk_delay-and-vprintk_default.patch
+0010-printk-combine-boot_delay_msec-into-printk_delay.patch
+0011-printk-introduce-kernel-sync-mode.patch
+0012-printk-move-console-printing-to-kthreads.patch
+0013-printk-remove-deferred-printing.patch
+0014-printk-add-console-handover.patch
+printk-Tiny-cleanup.patch
+
############################################################
# Needs to address review feedback
############################################################
@@ -144,23 +151,22 @@ tasklets-Use-static-line-for-functions.patch
0004-locking-rtmutex-Remove-rt_mutex_timed_lock.patch
0005-locking-rtmutex-Handle-the-various-new-futex-race-co.patch
0006-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch
-0007-locking-rtmutex-Add-rtmutex_lock_killable.patch
-0008-locking-rtmutex-Make-lock_killable-work.patch
-0009-locking-spinlock-Split-the-lock-types-header.patch
-0010-locking-rtmutex-Avoid-include-hell.patch
-0011-lockdep-Reduce-header-files-in-debug_locks.h.patch
-0012-locking-split-out-the-rbtree-definition.patch
-0013-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch
-0014-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch
-0015-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
-0016-locking-rtmutex-add-sleeping-lock-implementation.patch
-0017-locking-rtmutex-Allow-rt_mutex_trylock-on-PREEMPT_RT.patch
-0018-locking-rtmutex-add-mutex-implementation-based-on-rt.patch
-0019-locking-rtmutex-add-rwsem-implementation-based-on-rt.patch
-0020-locking-rtmutex-add-rwlock-implementation-based-on-r.patch
-0021-locking-rtmutex-wire-up-RT-s-locking.patch
-0022-locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
-0023-locking-rtmutex-Use-custom-scheduling-function-for-s.patch
+0007-locking-rtmutex-Make-lock_killable-work.patch
+0008-locking-spinlock-Split-the-lock-types-header.patch
+0009-locking-rtmutex-Avoid-include-hell.patch
+0010-lockdep-Reduce-header-files-in-debug_locks.h.patch
+0011-locking-split-out-the-rbtree-definition.patch
+0012-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch
+0013-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch
+0014-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
+0015-locking-rtmutex-add-sleeping-lock-implementation.patch
+0016-locking-rtmutex-Allow-rt_mutex_trylock-on-PREEMPT_RT.patch
+0017-locking-rtmutex-add-mutex-implementation-based-on-rt.patch
+0018-locking-rtmutex-add-rwsem-implementation-based-on-rt.patch
+0019-locking-rtmutex-add-rwlock-implementation-based-on-r.patch
+0020-locking-rtmutex-wire-up-RT-s-locking.patch
+0021-locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
+0022-locking-rtmutex-Use-custom-scheduling-function-for-s.patch
###############################################################
# Stuff broken upstream and upstream wants something different
@@ -178,6 +184,7 @@ signal-revert-ptrace-preempt-magic.patch
# PREEMPT NORT
preempt-nort-rt-variants.patch
mm-make-vmstat-rt-aware.patch
+mm-memcontrol-Disable-preemption-in-__mod_memcg_lruv.patch
# seqcount
# https://lkml.kernel.org/r/20200817000200.20993-1-rdunlap@infradead.org
@@ -194,11 +201,13 @@ seqlock-Fix-multiple-kernel-doc-warnings.patch
0008-seqlock-seqcount-latch-APIs-Only-allow-seqcount_latc.patch
# 2020-09-04 17:32 Ahmed S. Darwis [PATCH v2 0/5] seqlock: Introduce PREEMPT_RT support
# 20200904153231.11994-1-a.darwish@linutronix.de
-0009-seqlock-seqcount_LOCKNAME_t-Standardize-naming-conve.patch
-0010-seqlock-Use-unique-prefix-for-seqcount_t-property-ac.patch
-0011-seqlock-seqcount_t-Implement-all-read-APIs-as-statem.patch
-0012-seqlock-seqcount_LOCKNAME_t-Introduce-PREEMPT_RT-sup.patch
-0013-seqlock-PREEMPT_RT-Do-not-starve-seqlock_t-writers.patch
+#0009-seqlock-seqcount_LOCKNAME_t-Standardize-naming-conve.patch
+#0010-seqlock-Use-unique-prefix-for-seqcount_t-property-ac.patch
+#0011-seqlock-seqcount_t-Implement-all-read-APIs-as-statem.patch
+#0012-seqlock-seqcount_LOCKNAME_t-Introduce-PREEMPT_RT-sup.patch
+#0013-seqlock-PREEMPT_RT-Do-not-starve-seqlock_t-writers.patch
+## 267580db047ef428a70bef8287ca62c5a450c139
+#seqlock-Unbreak-lockdep.patch
##
0024-xfrm-Use-sequence-counter-with-associated-spinlock.patch
u64_stats-Disable-preemption-on-32bit-UP-SMP-with-RT.patch
diff --git a/debian/patches-rt/shmem-Use-raw_spinlock_t-for-stat_lock.patch b/debian/patches-rt/shmem-Use-raw_spinlock_t-for-stat_lock.patch
index 10917f36b..8f4b69112 100644
--- a/debian/patches-rt/shmem-Use-raw_spinlock_t-for-stat_lock.patch
+++ b/debian/patches-rt/shmem-Use-raw_spinlock_t-for-stat_lock.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 14 Aug 2020 18:53:34 +0200
Subject: [PATCH] shmem: Use raw_spinlock_t for ->stat_lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Each CPU has SHMEM_INO_BATCH inodes available in `->ino_batch' which is
per-CPU. Access here is serialized by disabling preemption. If the pool is
diff --git a/debian/patches-rt/signal-Prevent-double-free-of-user-struct.patch b/debian/patches-rt/signal-Prevent-double-free-of-user-struct.patch
index b6f7b9a5f..cb272f54b 100644
--- a/debian/patches-rt/signal-Prevent-double-free-of-user-struct.patch
+++ b/debian/patches-rt/signal-Prevent-double-free-of-user-struct.patch
@@ -1,7 +1,7 @@
From: Matt Fleming <matt@codeblueprint.co.uk>
Date: Tue, 7 Apr 2020 10:54:13 +0100
Subject: [PATCH] signal: Prevent double-free of user struct
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The way user struct reference counting works changed significantly with,
diff --git a/debian/patches-rt/signal-revert-ptrace-preempt-magic.patch b/debian/patches-rt/signal-revert-ptrace-preempt-magic.patch
index 797ed91cf..8d95d6554 100644
--- a/debian/patches-rt/signal-revert-ptrace-preempt-magic.patch
+++ b/debian/patches-rt/signal-revert-ptrace-preempt-magic.patch
@@ -1,7 +1,7 @@
Subject: signal: Revert ptrace preempt magic
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 21 Sep 2011 19:57:12 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Upstream commit '53da1d9456fe7f8 fix ptrace slowness' is nothing more
than a bandaid around the ptrace design trainwreck. It's not a
diff --git a/debian/patches-rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/debian/patches-rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index 1761801c3..3cc014f26 100644
--- a/debian/patches-rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/debian/patches-rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 3 Jul 2009 08:44:56 -0500
Subject: signals: Allow rt tasks to cache one sigqueue struct
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
To avoid allocation allow rt tasks to cache one sigqueue struct in
task struct.
diff --git a/debian/patches-rt/skbufhead-raw-lock.patch b/debian/patches-rt/skbufhead-raw-lock.patch
index 1e79f841c..7d8a7fef1 100644
--- a/debian/patches-rt/skbufhead-raw-lock.patch
+++ b/debian/patches-rt/skbufhead-raw-lock.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 12 Jul 2011 15:38:34 +0200
Subject: net: Use skbufhead with raw lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Use the rps lock as rawlock so we can keep irq-off regions. It looks low
latency. However we can't kfree() from this context therefore we defer this
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
}
-@@ -10965,7 +10965,7 @@ static int __init net_dev_init(void)
+@@ -11045,7 +11045,7 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
diff --git a/debian/patches-rt/slub-disable-SLUB_CPU_PARTIAL.patch b/debian/patches-rt/slub-disable-SLUB_CPU_PARTIAL.patch
index b57c2b4f4..276d100a0 100644
--- a/debian/patches-rt/slub-disable-SLUB_CPU_PARTIAL.patch
+++ b/debian/patches-rt/slub-disable-SLUB_CPU_PARTIAL.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 15 Apr 2015 19:00:47 +0200
Subject: slub: Disable SLUB_CPU_PARTIAL
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915
|in_atomic(): 1, irqs_disabled(): 0, pid: 87, name: rcuop/7
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1960,7 +1960,7 @@ config SHUFFLE_PAGE_ALLOCATOR
+@@ -1961,7 +1961,7 @@ config SHUFFLE_PAGE_ALLOCATOR
config SLUB_CPU_PARTIAL
default y
diff --git a/debian/patches-rt/slub-enable-irqs-for-no-wait.patch b/debian/patches-rt/slub-enable-irqs-for-no-wait.patch
index 5866d9588..817078dab 100644
--- a/debian/patches-rt/slub-enable-irqs-for-no-wait.patch
+++ b/debian/patches-rt/slub-enable-irqs-for-no-wait.patch
@@ -1,7 +1,7 @@
Subject: slub: Enable irqs for __GFP_WAIT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 09 Jan 2013 12:08:15 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
SYSTEM_RUNNING might be too late for enabling interrupts. Allocations
with GFP_WAIT can happen before that. So use this as an indicator.
diff --git a/debian/patches-rt/softirq--Add-RT-variant.patch b/debian/patches-rt/softirq--Add-RT-variant.patch
index 53c5f361c..77f67fa26 100644
--- a/debian/patches-rt/softirq--Add-RT-variant.patch
+++ b/debian/patches-rt/softirq--Add-RT-variant.patch
@@ -1,7 +1,7 @@
Subject: softirq: Add RT variant
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 21 Sep 2020 17:26:19 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
diff --git a/debian/patches-rt/softirq--Replace-barrier---with-cpu_relax---in-tasklet_unlock_wait--.patch b/debian/patches-rt/softirq--Replace-barrier---with-cpu_relax---in-tasklet_unlock_wait--.patch
index ec902b1f0..56624f4c8 100644
--- a/debian/patches-rt/softirq--Replace-barrier---with-cpu_relax---in-tasklet_unlock_wait--.patch
+++ b/debian/patches-rt/softirq--Replace-barrier---with-cpu_relax---in-tasklet_unlock_wait--.patch
@@ -1,7 +1,7 @@
Subject: softirq: Replace barrier() with cpu_relax() in tasklet_unlock_wait()
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 31 Aug 2020 15:12:38 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
A barrier() in a tight loop which waits for something to happen on a remote
CPU is a pointless exercise. Replace it with cpu_relax() which allows HT
diff --git a/debian/patches-rt/softirq-disable-softirq-stacks-for-rt.patch b/debian/patches-rt/softirq-disable-softirq-stacks-for-rt.patch
index 389bdf008..02a874e2d 100644
--- a/debian/patches-rt/softirq-disable-softirq-stacks-for-rt.patch
+++ b/debian/patches-rt/softirq-disable-softirq-stacks-for-rt.patch
@@ -1,7 +1,7 @@
Subject: softirq: Disable softirq stacks for RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 18 Jul 2011 13:59:17 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Disable extra stacks for softirqs. We want to preempt softirqs and
having them on special IRQ-stack does not make this easier.
diff --git a/debian/patches-rt/softirq-preempt-fix-3-re.patch b/debian/patches-rt/softirq-preempt-fix-3-re.patch
index 477a2cbcc..af05ea3c4 100644
--- a/debian/patches-rt/softirq-preempt-fix-3-re.patch
+++ b/debian/patches-rt/softirq-preempt-fix-3-re.patch
@@ -1,7 +1,7 @@
Subject: softirq: Check preemption after reenabling interrupts
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 13 Nov 2011 17:17:09 +0100 (CET)
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
raise_softirq_irqoff() disables interrupts and wakes the softirq
daemon, but after reenabling interrupts there is no preemption check,
@@ -15,30 +15,11 @@ Reported-by: Carsten Emde <cbe@osadl.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- block/blk-mq.c | 2 ++
include/linux/preempt.h | 3 +++
lib/irq_poll.c | 5 +++++
net/core/dev.c | 7 +++++++
- 4 files changed, 17 insertions(+)
+ 3 files changed, 15 insertions(+)
---- a/block/blk-mq.c
-+++ b/block/blk-mq.c
-@@ -604,6 +604,7 @@ static void blk_mq_trigger_softirq(struc
- if (list->next == &rq->ipi_list)
- raise_softirq_irqoff(BLOCK_SOFTIRQ);
- local_irq_restore(flags);
-+ preempt_check_resched_rt();
- }
-
- static int blk_softirq_cpu_dead(unsigned int cpu)
-@@ -617,6 +618,7 @@ static int blk_softirq_cpu_dead(unsigned
- this_cpu_ptr(&blk_cpu_done));
- raise_softirq_irqoff(BLOCK_SOFTIRQ);
- local_irq_enable();
-+ preempt_check_resched_rt();
-
- return 0;
- }
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -187,8 +187,10 @@ do { \
@@ -151,7 +132,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -10631,6 +10637,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -10711,6 +10717,7 @@ static int dev_cpu_dead(unsigned int old
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/debian/patches-rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch b/debian/patches-rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
index a236ae102..921f444bb 100644
--- a/debian/patches-rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
+++ b/debian/patches-rt/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
@@ -1,7 +1,7 @@
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Wed, 18 Feb 2015 16:05:28 +0100
Subject: sunrpc: Make svc_xprt_do_enqueue() use get_cpu_light()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915
|in_atomic(): 1, irqs_disabled(): 0, pid: 3194, name: rpc.nfsd
diff --git a/debian/patches-rt/sysfs-realtime-entry.patch b/debian/patches-rt/sysfs-realtime-entry.patch
index aeb26f3b0..aa0be6c7a 100644
--- a/debian/patches-rt/sysfs-realtime-entry.patch
+++ b/debian/patches-rt/sysfs-realtime-entry.patch
@@ -1,7 +1,7 @@
Subject: sysfs: Add /sys/kernel/realtime entry
From: Clark Williams <williams@redhat.com>
Date: Sat Jul 30 21:55:53 2011 -0500
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Add a /sys/kernel entry to indicate that the kernel is a
realtime kernel.
diff --git a/debian/patches-rt/tasklets--Avoid-cancel-kill-deadlock-on-RT.patch b/debian/patches-rt/tasklets--Avoid-cancel-kill-deadlock-on-RT.patch
index afff8a1ed..d494c5ba6 100644
--- a/debian/patches-rt/tasklets--Avoid-cancel-kill-deadlock-on-RT.patch
+++ b/debian/patches-rt/tasklets--Avoid-cancel-kill-deadlock-on-RT.patch
@@ -1,7 +1,7 @@
Subject: tasklets: Avoid cancel/kill deadlock on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 21 Sep 2020 17:47:34 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
diff --git a/debian/patches-rt/tasklets-Use-static-line-for-functions.patch b/debian/patches-rt/tasklets-Use-static-line-for-functions.patch
index 72824e8a6..d12b30352 100644
--- a/debian/patches-rt/tasklets-Use-static-line-for-functions.patch
+++ b/debian/patches-rt/tasklets-Use-static-line-for-functions.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 7 Sep 2020 22:57:32 +0200
Subject: [PATCH] tasklets: Use static line for functions
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Inlines exist for a reason.
diff --git a/debian/patches-rt/tcp-Remove-superfluous-BH-disable-around-listening_h.patch b/debian/patches-rt/tcp-Remove-superfluous-BH-disable-around-listening_h.patch
index c3e986abc..1efb5f0eb 100644
--- a/debian/patches-rt/tcp-Remove-superfluous-BH-disable-around-listening_h.patch
+++ b/debian/patches-rt/tcp-Remove-superfluous-BH-disable-around-listening_h.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 12 Oct 2020 17:33:54 +0200
Subject: [PATCH] tcp: Remove superfluous BH-disable around listening_hash
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Commit
9652dc2eb9e40 ("tcp: relax listening_hash operations")
@@ -16,15 +16,13 @@ inet_unhash() conditionally acquires listening_hash->lock.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- net/ipv4/inet_hashtables.c | 19 ++++++++++++-------
- net/ipv6/inet6_hashtables.c | 5 +----
+ net/ipv4/inet_hashtables.c | 19 ++++++++++++-------
+ net/ipv6/inet6_hashtables.c | 5 +----
2 files changed, 13 insertions(+), 11 deletions(-)
-diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
-index 239e54474b653..fcb105cbb5465 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
-@@ -585,7 +585,9 @@ int __inet_hash(struct sock *sk, struct sock *osk)
+@@ -585,7 +585,9 @@ int __inet_hash(struct sock *sk, struct
int err = 0;
if (sk->sk_state != TCP_LISTEN) {
@@ -82,8 +80,6 @@ index 239e54474b653..fcb105cbb5465 100644
}
EXPORT_SYMBOL_GPL(inet_unhash);
-diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
-index 2d3add9e61162..50fd17cbf3ec7 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -335,11 +335,8 @@ int inet6_hash(struct sock *sk)
@@ -99,6 +95,3 @@ index 2d3add9e61162..50fd17cbf3ec7 100644
return err;
}
---
-2.28.0
-
diff --git a/debian/patches-rt/tick-sched--Prevent-false-positive-softirq-pending-warnings-on-RT.patch b/debian/patches-rt/tick-sched--Prevent-false-positive-softirq-pending-warnings-on-RT.patch
index 0bc8602c6..405787e7d 100644
--- a/debian/patches-rt/tick-sched--Prevent-false-positive-softirq-pending-warnings-on-RT.patch
+++ b/debian/patches-rt/tick-sched--Prevent-false-positive-softirq-pending-warnings-on-RT.patch
@@ -1,7 +1,7 @@
Subject: tick/sched: Prevent false positive softirq pending warnings on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 31 Aug 2020 17:02:36 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
On RT a task which has soft interrupts disabled can block on a lock and
schedule out to idle while soft interrupts are pending. This triggers the
diff --git a/debian/patches-rt/tpm-remove-tpm_dev_wq_lock.patch b/debian/patches-rt/tpm-remove-tpm_dev_wq_lock.patch
index a17a24e4f..5f2f2ffa9 100644
--- a/debian/patches-rt/tpm-remove-tpm_dev_wq_lock.patch
+++ b/debian/patches-rt/tpm-remove-tpm_dev_wq_lock.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 11 Feb 2019 11:33:11 +0100
Subject: [PATCH] tpm: remove tpm_dev_wq_lock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Added in commit
diff --git a/debian/patches-rt/tpm_tis-fix-stall-after-iowrite-s.patch b/debian/patches-rt/tpm_tis-fix-stall-after-iowrite-s.patch
index 3dbbab4cc..2c4b46c0a 100644
--- a/debian/patches-rt/tpm_tis-fix-stall-after-iowrite-s.patch
+++ b/debian/patches-rt/tpm_tis-fix-stall-after-iowrite-s.patch
@@ -1,7 +1,7 @@
From: Haris Okanovic <haris.okanovic@ni.com>
Date: Tue, 15 Aug 2017 15:13:08 -0500
Subject: [PATCH] tpm_tis: fix stall after iowrite*()s
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
ioread8() operations to TPM MMIO addresses can stall the cpu when
immediately following a sequence of iowrite*()'s to the same region.
diff --git a/debian/patches-rt/u64_stats-Disable-preemption-on-32bit-UP-SMP-with-RT.patch b/debian/patches-rt/u64_stats-Disable-preemption-on-32bit-UP-SMP-with-RT.patch
index 28267b0f1..34c2465c6 100644
--- a/debian/patches-rt/u64_stats-Disable-preemption-on-32bit-UP-SMP-with-RT.patch
+++ b/debian/patches-rt/u64_stats-Disable-preemption-on-32bit-UP-SMP-with-RT.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 17 Aug 2020 12:28:10 +0200
Subject: [PATCH] u64_stats: Disable preemption on 32bit-UP/SMP with RT during
updates
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
On RT the seqcount_t is required even on UP because the softirq can be
preempted. The IRQ handler is threaded so it is also preemptible.
diff --git a/debian/patches-rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/debian/patches-rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
index 79d8238f6..df0679829 100644
--- a/debian/patches-rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
+++ b/debian/patches-rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
@@ -1,7 +1,7 @@
Subject: net: Remove preemption disabling in netif_rx()
From: Priyanka Jain <Priyanka.Jain@freescale.com>
Date: Thu, 17 May 2012 09:35:11 +0530
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
1)enqueue_to_backlog() (called from netif_rx) should be
bind to a particluar CPU. This can be achieved by
diff --git a/debian/patches-rt/wait.h-include-atomic.h.patch b/debian/patches-rt/wait.h-include-atomic.h.patch
index ccc23df66..42efa18f3 100644
--- a/debian/patches-rt/wait.h-include-atomic.h.patch
+++ b/debian/patches-rt/wait.h-include-atomic.h.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 28 Oct 2013 12:19:57 +0100
Subject: wait.h: include atomic.h
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
| CC init/main.o
|In file included from include/linux/mmzone.h:9:0,
diff --git a/debian/patches-rt/x86-Enable-RT-also-on-32bit.patch b/debian/patches-rt/x86-Enable-RT-also-on-32bit.patch
index f18d2536f..251394a08 100644
--- a/debian/patches-rt/x86-Enable-RT-also-on-32bit.patch
+++ b/debian/patches-rt/x86-Enable-RT-also-on-32bit.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 7 Nov 2019 17:49:20 +0100
Subject: [PATCH] x86: Enable RT also on 32bit
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
diff --git a/debian/patches-rt/x86-Enable-RT.patch b/debian/patches-rt/x86-Enable-RT.patch
index 340ed346b..67cc07b20 100644
--- a/debian/patches-rt/x86-Enable-RT.patch
+++ b/debian/patches-rt/x86-Enable-RT.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 7 Aug 2019 18:15:38 +0200
Subject: [PATCH] x86: Allow to enable RT
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Allow to select RT.
diff --git a/debian/patches-rt/x86-crypto-reduce-preempt-disabled-regions.patch b/debian/patches-rt/x86-crypto-reduce-preempt-disabled-regions.patch
index 6ac293b90..9e63224c3 100644
--- a/debian/patches-rt/x86-crypto-reduce-preempt-disabled-regions.patch
+++ b/debian/patches-rt/x86-crypto-reduce-preempt-disabled-regions.patch
@@ -1,7 +1,7 @@
Subject: x86: crypto: Reduce preempt disabled regions
From: Peter Zijlstra <peterz@infradead.org>
Date: Mon, 14 Nov 2011 18:19:27 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Restrict the preempt disabled regions to the actual floating point
operations and enable preemption for the administrative actions.
diff --git a/debian/patches-rt/x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch b/debian/patches-rt/x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch
index 56cf6fa43..b67da3437 100644
--- a/debian/patches-rt/x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch
+++ b/debian/patches-rt/x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch
@@ -2,7 +2,7 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 30 Jun 2020 11:45:14 +0200
Subject: [PATCH] x86/entry: Use should_resched() in
idtentry_exit_cond_resched()
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
The TIF_NEED_RESCHED bit is inlined on x86 into the preemption counter.
By using should_resched(0) instead of need_resched() the same check can
diff --git a/debian/patches-rt/x86-fpu--Do-not-disable-BH-on-RT.patch b/debian/patches-rt/x86-fpu--Do-not-disable-BH-on-RT.patch
index c8cd9af07..8f4503b53 100644
--- a/debian/patches-rt/x86-fpu--Do-not-disable-BH-on-RT.patch
+++ b/debian/patches-rt/x86-fpu--Do-not-disable-BH-on-RT.patch
@@ -1,7 +1,7 @@
Subject: x86/fpu: Do not disable BH on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 21 Sep 2020 20:15:50 +0200
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
diff --git a/debian/patches-rt/x86-highmem-add-a-already-used-pte-check.patch b/debian/patches-rt/x86-highmem-add-a-already-used-pte-check.patch
index 4a554572c..727c52d9a 100644
--- a/debian/patches-rt/x86-highmem-add-a-already-used-pte-check.patch
+++ b/debian/patches-rt/x86-highmem-add-a-already-used-pte-check.patch
@@ -1,7 +1,7 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 11 Mar 2013 17:09:55 +0100
Subject: x86/highmem: Add a "already used pte" check
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
This is a copy from kmap_atomic_prot().
diff --git a/debian/patches-rt/x86-kvm-require-const-tsc-for-rt.patch b/debian/patches-rt/x86-kvm-require-const-tsc-for-rt.patch
index 37647f087..15e31bebc 100644
--- a/debian/patches-rt/x86-kvm-require-const-tsc-for-rt.patch
+++ b/debian/patches-rt/x86-kvm-require-const-tsc-for-rt.patch
@@ -1,7 +1,7 @@
Subject: x86: kvm Require const tsc for RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 06 Nov 2011 12:26:18 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Non constant TSC is a nightmare on bare metal already, but with
virtualization it becomes a complete disaster because the workarounds
diff --git a/debian/patches-rt/x86-preempt-lazy.patch b/debian/patches-rt/x86-preempt-lazy.patch
index 577ffb22a..6236ec0f7 100644
--- a/debian/patches-rt/x86-preempt-lazy.patch
+++ b/debian/patches-rt/x86-preempt-lazy.patch
@@ -1,7 +1,7 @@
Subject: x86: Support for lazy preemption
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 01 Nov 2012 11:03:47 +0100
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
Implement the x86 pieces for lazy preempt.
diff --git a/debian/patches-rt/x86-stackprot-no-random-on-rt.patch b/debian/patches-rt/x86-stackprot-no-random-on-rt.patch
index 3eac47b02..d4ab0e64a 100644
--- a/debian/patches-rt/x86-stackprot-no-random-on-rt.patch
+++ b/debian/patches-rt/x86-stackprot-no-random-on-rt.patch
@@ -1,7 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 16 Dec 2010 14:25:18 +0100
Subject: x86: stackprotector: Avoid random pool on rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rt16.tar.xz
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9.1-rt20.tar.xz
CPU bringup calls into the random pool to initialize the stack
canary. During boot that works nicely even on RT as the might sleep