From fffa51ff5fa05cd5ad5dac0d7147b4ec1ffc9512 Mon Sep 17 00:00:00 2001
From: j1nx
Date: Mon, 5 Feb 2024 16:41:40 +0000
Subject: [PATCH] [All] (testing) Bumpe kernel to 6.6.14
---
buildroot | 2 +-
buildroot-external/configs/ova_64_defconfig | 2 +-
buildroot-external/configs/rpi3_64_defconfig | 2 +-
buildroot-external/configs/rpi4_64_defconfig | 2 +-
buildroot-external/configs/x86_64_defconfig | 2 +-
...Constrain-locks-in-sched_submit_work.patch | 54 +
...001-vduse-Remove-include-of-rwlock.h.patch | 32 -
...Avoid-unconditional-slowpath-for-DEB.patch | 86 +
...able-preemption-in-ptrace_stop-on-PR.patch | 65 -
...ask_struct-saved_state-in-wait_task_.patch | 151 --
.../0003-sched-Extract-__schedule_loop.patch | 63 +
...-rt_mutex-specific-scheduler-helpers.patch | 137 ++
...-obsolte-u64_stats_fetch_-_irq-users.patch | 39 -
...Use-rt_mutex-specific-scheduler-help.patch | 191 ++
...bsolte-u64_stats_fetch_-_irq-users-d.patch | 2150 -----------------
...Add-a-lockdep-assert-to-catch-potent.patch | 66 +
...bsolte-u64_stats_fetch_-_irq-users-n.patch | 392 ---
...-obsolte-u64_stats_fetch_-_irq-users.patch | 50 -
...-Fix-recursive-rt_mutex-waiter-state.patch | 204 ++
...r-comment-about-the-preempt-disable-.patch | 52 +
...move-the-obsolete-fetch_irq-variants.patch | 41 -
...able-preemption-in-ptrace_stop-on-PR.patch | 53 +
...Remove-migrate_en-dis-from-dc_fpu_be.patch | 91 +
...d-display-Simplify-the-per-CPU-usage.patch | 132 +
...Add-a-warning-if-the-FPU-is-used-out.patch | 31 +
...Move-the-memory-allocation-out-of-dc.patch | 96 +
...Move-the-memory-allocation-out-of-dc.patch | 130 +
... 0015-net-Avoid-the-IPI-to-free-the.patch} | 36 +-
...16-tpm_tis-fix-stall-after-iowrite-s.patch | 81 -
...atch => 0016-x86-Allow-to-enable-RT.patch} | 10 +-
...=> 0017-x86-Enable-RT-also-on-32bit.patch} | 12 +-
...ep-Remove-lockdep_init_map_crosslock.patch | 33 -
...n-t-try-push-tasks-if-there-are-none.patch | 63 +
.../0019-printk-Bring-back-the-RT-bits.patch | 1233 ----------
...-dedicated-thread-for-timer-wakeups.patch} | 22 +-
...dd-infrastucture-for-atomic-consoles.patch | 607 -----
...orce-sched-priority-to-timersd-on-b.patch} | 12 +-
...1-serial-8250-implement-write_atomic.patch | 937 -------
...storm-since-introduction-of-timersd.patch} | 12 +-
...avoid-preempt_disable-for-PREEMPT_RT.patch | 95 -
...Wake-ktimers-thread-also-in-softirq.patch} | 8 +-
...spinlocks-with-spinlock_t-for-PREEM.patch} | 39 +-
...mpt_enable-within-an-instrumentation.patch | 52 +
...de-a-method-to-check-if-a-task-is-PI.patch | 61 +
...function-to-preempt-serving-softirqs.patch | 67 +
...me-Allow-to-preempt-after-a-callback.patch | 52 +
...rial-core-Provide-port-lock-wrappers.patch | 131 +
.../0029-serial-core-Use-lock-wrappers.patch | 98 +
...-serial-21285-Use-port-lock-wrappers.patch | 80 +
..._aspeed_vuart-Use-port-lock-wrappers.patch | 66 +
...ched-Add-support-for-lazy-preemption.patch | 713 ------
...-8250_bcm7271-Use-port-lock-wrappers.patch | 156 ++
...3-serial-8250-Use-port-lock-wrappers.patch | 472 ++++
...ould_resched-in-idtentry_exit_cond_r.patch | 35 -
...rial-8250_dma-Use-port-lock-wrappers.patch | 85 +
...0034-x86-Support-for-lazy-preemption.patch | 157 --
...5-entry-Fix-the-preempt-lazy-fallout.patch | 48 -
...erial-8250_dw-Use-port-lock-wrappers.patch | 74 +
...-arm-Add-support-for-lazy-preemption.patch | 136 --
...ial-8250_exar-Use-port-lock-wrappers.patch | 57 +
...erpc-Add-support-for-lazy-preemption.patch | 117 -
...rial-8250_fsl-Use-port-lock-wrappers.patch | 68 +
...-arch-arm64-Add-lazy-preempt-support.patch | 145 --
...rial-8250_mtk-Use-port-lock-wrappers.patch | 82 +
...ial-8250_omap-Use-port-lock-wrappers.patch | 241 ++
...8250_pci1xxxx-Use-port-lock-wrappers.patch | 71 +
...tera_jtaguart-Use-port-lock-wrappers.patch | 138 ++
...l-altera_uart-Use-port-lock-wrappers.patch | 121 +
...al-pl011-Make-the-locking-work-on-RT.patch | 59 -
...al-amba-pl010-Use-port-lock-wrappers.patch | 117 +
...al-amba-pl011-Use-port-lock-wrappers.patch | 332 +++
...45-serial-apb-Use-port-lock-wrappers.patch | 81 +
...serial-ar933x-Use-port-lock-wrappers.patch | 149 ++
...rial-arc_uart-Use-port-lock-wrappers.patch | 102 +
...-serial-atmel-Use-port-lock-wrappers.patch | 124 +
...-bcm63xx-uart-Use-port-lock-wrappers.patch | 133 +
...rial-cpm_uart-Use-port-lock-wrappers.patch | 75 +
...ial-digicolor-Use-port-lock-wrappers.patch | 118 +
.../linux/0052-Linux-6.1.46-rt13-REBASE.patch | 20 -
...052-serial-dz-Use-port-lock-wrappers.patch | 166 ++
...-disable-preempt-on-RT-in-io_mapping.patch | 92 -
...l-linflexuart-Use-port-lock-wrappers.patch | 148 ++
...itigate-indefinite-writer-starvation.patch | 62 -
...al-fsl_lpuart-Use-port-lock-wrappers.patch | 394 +++
...ert-softirq-Let-ksoftirqd-do-its-job.patch | 107 -
...5-serial-icom-Use-port-lock-wrappers.patch | 156 ++
...king-Annotate-debug_object_fill_pool.patch | 176 --
...56-serial-imx-Use-port-lock-wrappers.patch | 359 +++
...lse-lockdep-splat-in-put_task_struct.patch | 51 -
...ial-ip22zilog-Use-port-lock-wrappers.patch | 190 ++
...e-write_seqlock_irqsave-instead-writ.patch | 96 -
...58-serial-jsm-Use-port-lock-wrappers.patch | 131 +
...f-Remove-in_atomic-from-bpf_link_put.patch | 119 -
...rial-liteuart-Use-port-lock-wrappers.patch | 115 +
...ure-timer-ID-search-loop-limit-is-va.patch | 114 -
...al-lpc32xx_hs-Use-port-lock-wrappers.patch | 153 ++
...serial-ma35d1-Use-port-lock-wrappers.patch | 122 +
...62-serial-mcf-Use-port-lock-wrappers.patch | 132 +
...men_z135_uart-Use-port-lock-wrappers.patch | 81 +
...-serial-meson-Use-port-lock-wrappers.patch | 173 ++
...milbeaut_usio-Use-port-lock-wrappers.patch | 106 +
...erial-mpc52xx-Use-port-lock-wrappers.patch | 94 +
...ial-mps2-uart-Use-port-lock-wrappers.patch | 108 +
...68-serial-msm-Use-port-lock-wrappers.patch | 190 ++
...al-mvebu-uart-Use-port-lock-wrappers.patch | 113 +
...0-serial-omap-Use-port-lock-wrappers.patch | 185 ++
...71-serial-owl-Use-port-lock-wrappers.patch | 152 ++
...72-serial-pch-Use-port-lock-wrappers.patch | 85 +
...-serial-pic32-Use-port-lock-wrappers.patch | 123 +
...al-pmac_zilog-Use-port-lock-wrappers.patch | 237 ++
...75-serial-pxa-Use-port-lock-wrappers.patch | 155 ++
...ial-qcom-geni-Use-port-lock-wrappers.patch | 76 +
...77-serial-rda-Use-port-lock-wrappers.patch | 182 ++
...78-serial-rp2-Use-port-lock-wrappers.patch | 119 +
...serial-sa1100-Use-port-lock-wrappers.patch | 122 +
...l-samsung_tty-Use-port-lock-wrappers.patch | 250 ++
...-sb1250-duart-Use-port-lock-wrappers.patch | 90 +
...ial-sc16is7xx-Use-port-lock-wrappers.patch | 186 ++
...-serial-tegra-Use-port-lock-wrappers.patch | 181 ++
...4-serial-core-Use-port-lock-wrappers.patch | 372 +++
...al-mctrl_gpio-Use-port-lock-wrappers.patch | 63 +
...6-serial-txx9-Use-port-lock-wrappers.patch | 139 ++
...serial-sh-sci-Use-port-lock-wrappers.patch | 307 +++
...serial-sifive-Use-port-lock-wrappers.patch | 107 +
...9-serial-sprd-Use-port-lock-wrappers.patch | 167 ++
...serial-st-asc-Use-port-lock-wrappers.patch | 115 +
...-serial-stm32-Use-port-lock-wrappers.patch | 189 ++
...-serial-sunhv-Use-port-lock-wrappers.patch | 154 ++
...-sunplus-uart-Use-port-lock-wrappers.patch | 151 ++
...serial-sunsab-Use-port-lock-wrappers.patch | 181 ++
...-serial-sunsu-Use-port-lock-wrappers.patch | 224 ++
...rial-sunzilog-Use-port-lock-wrappers.patch | 216 ++
...rial-timbuart-Use-port-lock-wrappers.patch | 76 +
...rial-uartlite-Use-port-lock-wrappers.patch | 110 +
...rial-ucc_uart-Use-port-lock-wrappers.patch | 64 +
...serial-vt8500-Use-port-lock-wrappers.patch | 81 +
...xilinx_uartps-Use-port-lock-wrappers.patch | 281 +++
...KL-nbcon-console-basic-infrastructur.patch | 274 +++
...intk-nbcon-Add-acquire-release-logic.patch | 712 ++++++
...ic-printk-buffers-available-to-nbcon.patch | 72 +
...5-printk-nbcon-Add-buffer-management.patch | 322 +++
...-nbcon-Add-ownership-state-functions.patch | 184 ++
...7-printk-nbcon-Add-sequence-handling.patch | 322 +++
...-emit-function-and-callback-function.patch | 272 +++
...ow-drivers-to-mark-unsafe-regions-an.patch | 143 ++
...legal-pbufs-access-for-CONFIG_PRINTK.patch | 139 ++
...-printk-Reduce-pr_flush-pooling-time.patch | 107 +
...intk-nbcon-Relocate-32bit-seq-macros.patch | 148 ++
...-Adjust-mapping-for-32bit-seq-macros.patch | 76 +
...irst_seq-as-base-for-32bit-seq-macro.patch | 78 +
...r-Do-not-skip-non-finalized-records-.patch | 312 +++
...ngbuffer-Clarify-special-lpos-values.patch | 99 +
...ess_panic_printk-check-for-other-CPU.patch | 39 +
.../0118-printk-Add-this_cpu_in_panic.patch | 95 +
...ingbuffer-Cleanup-reader-terminology.patch | 72 +
...r-all-reserved-records-with-pr_flush.patch | 179 ++
...r-Skip-non-finalized-records-in-pani.patch | 73 +
...r-Consider-committed-as-finalized-in.patch | 66 +
...assing-console-lock-owner-completely.patch | 112 +
...non-panic-CPUs-writing-to-ringbuffer.patch | 83 +
...c-Flush-kernel-log-buffer-at-the-end.patch | 43 +
...ider-nbcon-boot-consoles-on-seq-init.patch | 56 +
...rse-notation-to-console_srcu-locking.patch | 41 +
...ure-ownership-release-on-failed-emit.patch | 63 +
...ck-printk_deferred_enter-_exit-usage.patch | 65 +
...lement-processing-in-port-lock-wrapp.patch | 255 ++
...-driver_enter-driver_exit-console-ca.patch | 47 +
...console_is_usable-available-to-nbcon.patch | 110 +
...k-Let-console_is_usable-handle-nbcon.patch | 48 +
...flags-argument-for-console_is_usable.patch | 74 +
...vide-function-to-flush-using-write_a.patch | 195 ++
...rintk-Track-registered-boot-consoles.patch | 83 +
...-nbcon-consoles-in-console_flush_all.patch | 182 ++
...n-Assign-priority-based-on-CPU-state.patch | 120 +
...k-nbcon-Add-unsafe-flushing-on-panic.patch | 105 +
...sole_lock-dance-if-no-legacy-or-boot.patch | 218 ++
.../0141-printk-Track-nbcon-consoles.patch | 72 +
...-Coordinate-direct-printing-in-panic.patch | 143 ++
...k-nbcon-Implement-emergency-sections.patch | 243 ++
...panic-Mark-emergency-section-in-warn.patch | 43 +
...panic-Mark-emergency-section-in-oops.patch | 44 +
...Mark-emergency-section-in-rcu-stalls.patch | 50 +
...-emergency-section-in-lockdep-splats.patch | 50 +
...tk-nbcon-Introduce-printing-kthreads.patch | 452 ++++
...-print-in-printk-context-on-shutdown.patch | 46 +
...con-Add-context-to-console_is_usable.patch | 120 +
...ntk-nbcon-Add-printer-thread-wakeups.patch | 175 ++
...bcon-Stop-threads-on-shutdown-reboot.patch | 65 +
...-printk-nbcon-Start-printing-threads.patch | 144 ++
...-Add-nbcon-support-for-proc-consoles.patch | 58 +
...y-sysfs-Add-nbcon-support-for-active.patch | 38 +
...vide-function-to-reacquire-ownership.patch | 99 +
...ide-low-level-functions-to-port-lock.patch | 48 +
...-serial-8250-Switch-to-nbcon-console.patch | 344 +++
...-Add-kthread-for-all-legacy-consoles.patch | 431 ++++
...rt-drop-lockdep-annotation-from-seri.patch | 34 +
...se-positive-lockdep-report-for-legac.patch | 69 +
...mpt_disable-enable_rt-where-recomme.patch} | 18 +-
...sable-interrupts-on-PREEMPT_RT-duri.patch} | 20 +-
...eck-for-atomic-context-on-PREEMPT_R.patch} | 9 +-
...isable-tracing-points-on-PREEMPT_RT.patch} | 6 +-
..._I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch} | 8 +-
...ueue-and-wait-for-the-irq_work-item.patch} | 4 +-
...pin_lock_irq-instead-of-local_irq_d.patch} | 18 +-
...m-i915-Drop-the-irqs_disabled-check.patch} | 6 +-
...o-not-disable-preemption-for-resets.patch} | 39 +-
...Consider-also-RCU-depth-in-busy-loop.patch | 34 +
...evert-drm-i915-Depend-on-PREEMPT_RT.patch} | 6 +-
.../0173-sched-define-TIF_ALLOW_RESCHED.patch | 822 +++++++
...rm-Disable-jump-label-on-PREEMPT_RT.patch} | 10 +-
...-translation-section-permission-fau.patch} | 12 +-
...rial-omap-Make-the-locking-RT-aware.patch} | 22 +-
...al-pl011-Make-the-locking-work-on-RT.patch | 47 +
...vfp-Provide-vfp_lock-for-VFP-locking.patch | 80 +
...vfp-Use-vfp_lock-in-vfp_sync_hwstate.patch | 49 +
...fp-Use-vfp_lock-in-vfp_support_entry.patch | 53 +
...ding-signals-outside-of-vfp_lock-ed-.patch | 126 +
...atch => 0182-ARM-Allow-to-enable-RT.patch} | 12 +-
...ch => 0183-ARM64-Allow-to-enable-RT.patch} | 12 +-
...> 0184-powerpc-traps-Use-PREEMPT_RT.patch} | 6 +-
...ommu-Use-a-locallock-instead-local_.patch} | 22 +-
...-Select-the-generic-memory-allocator.patch | 32 +
...le-in-kernel-MPIC-emulation-for-PRE.patch} | 8 +-
...ector-work-around-stack-guard-init-.patch} | 31 +-
... => 0189-POWERPC-Allow-to-enable-RT.patch} | 12 +-
...-misaligned-access-speed-in-parallel.patch | 196 ++
.../0191-riscv-add-PREEMPT_AUTO-support.patch | 51 +
.../linux/0192-riscv-allow-to-enable-RT.patch | 28 +
...sysfs-Add-sys-kernel-realtime-entry.patch} | 10 +-
...194-Add-localversion-for-RT-release.patch} | 8 +-
...ch => 0195-Linux-6.6.14-rt21-REBASE.patch} | 12 +-
231 files changed, 23132 insertions(+), 8386 deletions(-)
create mode 100644 buildroot-external/patches/linux/0001-sched-Constrain-locks-in-sched_submit_work.patch
delete mode 100644 buildroot-external/patches/linux/0001-vduse-Remove-include-of-rwlock.h.patch
create mode 100644 buildroot-external/patches/linux/0002-locking-rtmutex-Avoid-unconditional-slowpath-for-DEB.patch
delete mode 100644 buildroot-external/patches/linux/0002-signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch
delete mode 100644 buildroot-external/patches/linux/0003-sched-Consider-task_struct-saved_state-in-wait_task_.patch
create mode 100644 buildroot-external/patches/linux/0003-sched-Extract-__schedule_loop.patch
create mode 100644 buildroot-external/patches/linux/0004-sched-Provide-rt_mutex-specific-scheduler-helpers.patch
delete mode 100644 buildroot-external/patches/linux/0004-spi-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch
create mode 100644 buildroot-external/patches/linux/0005-locking-rtmutex-Use-rt_mutex-specific-scheduler-help.patch
delete mode 100644 buildroot-external/patches/linux/0005-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users-d.patch
create mode 100644 buildroot-external/patches/linux/0006-locking-rtmutex-Add-a-lockdep-assert-to-catch-potent.patch
delete mode 100644 buildroot-external/patches/linux/0006-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users-n.patch
delete mode 100644 buildroot-external/patches/linux/0007-bpf-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch
create mode 100644 buildroot-external/patches/linux/0007-futex-pi-Fix-recursive-rt_mutex-waiter-state.patch
create mode 100644 buildroot-external/patches/linux/0008-signal-Add-proper-comment-about-the-preempt-disable-.patch
delete mode 100644 buildroot-external/patches/linux/0008-u64_stat-Remove-the-obsolete-fetch_irq-variants.patch
create mode 100644 buildroot-external/patches/linux/0009-signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch
create mode 100644 buildroot-external/patches/linux/0010-drm-amd-display-Remove-migrate_en-dis-from-dc_fpu_be.patch
create mode 100644 buildroot-external/patches/linux/0011-drm-amd-display-Simplify-the-per-CPU-usage.patch
create mode 100644 buildroot-external/patches/linux/0012-drm-amd-display-Add-a-warning-if-the-FPU-is-used-out.patch
create mode 100644 buildroot-external/patches/linux/0013-drm-amd-display-Move-the-memory-allocation-out-of-dc.patch
create mode 100644 buildroot-external/patches/linux/0014-drm-amd-display-Move-the-memory-allocation-out-of-dc.patch
rename buildroot-external/patches/linux/{0009-net-Avoid-the-IPI-to-free-the.patch => 0015-net-Avoid-the-IPI-to-free-the.patch} (76%)
delete mode 100644 buildroot-external/patches/linux/0016-tpm_tis-fix-stall-after-iowrite-s.patch
rename buildroot-external/patches/linux/{0010-x86-Allow-to-enable-RT.patch => 0016-x86-Allow-to-enable-RT.patch} (72%)
rename buildroot-external/patches/linux/{0011-x86-Enable-RT-also-on-32bit.patch => 0017-x86-Enable-RT-also-on-32bit.patch} (75%)
delete mode 100644 buildroot-external/patches/linux/0018-locking-lockdep-Remove-lockdep_init_map_crosslock.patch
create mode 100644 buildroot-external/patches/linux/0018-sched-rt-Don-t-try-push-tasks-if-there-are-none.patch
delete mode 100644 buildroot-external/patches/linux/0019-printk-Bring-back-the-RT-bits.patch
rename buildroot-external/patches/linux/{0012-softirq-Use-a-dedicated-thread-for-timer-wakeups.patch => 0019-softirq-Use-a-dedicated-thread-for-timer-wakeups.patch} (91%)
delete mode 100644 buildroot-external/patches/linux/0020-printk-add-infrastucture-for-atomic-consoles.patch
rename buildroot-external/patches/linux/{0013-rcutorture-Also-force-sched-priority-to-timersd-on-b.patch => 0020-rcutorture-Also-force-sched-priority-to-timersd-on-b.patch} (88%)
delete mode 100644 buildroot-external/patches/linux/0021-serial-8250-implement-write_atomic.patch
rename buildroot-external/patches/linux/{0014-tick-Fix-timer-storm-since-introduction-of-timersd.patch => 0021-tick-Fix-timer-storm-since-introduction-of-timersd.patch} (91%)
delete mode 100644 buildroot-external/patches/linux/0022-printk-avoid-preempt_disable-for-PREEMPT_RT.patch
rename buildroot-external/patches/linux/{0015-softirq-Wake-ktimers-thread-also-in-softirq.patch => 0022-softirq-Wake-ktimers-thread-also-in-softirq.patch} (86%)
rename buildroot-external/patches/linux/{0017-zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch => 0023-zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch} (66%)
create mode 100644 buildroot-external/patches/linux/0024-preempt-Put-preempt_enable-within-an-instrumentation.patch
create mode 100644 buildroot-external/patches/linux/0025-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch
create mode 100644 buildroot-external/patches/linux/0026-softirq-Add-function-to-preempt-serving-softirqs.patch
create mode 100644 buildroot-external/patches/linux/0027-time-Allow-to-preempt-after-a-callback.patch
create mode 100644 buildroot-external/patches/linux/0028-serial-core-Provide-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0029-serial-core-Use-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0030-serial-21285-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0031-serial-8250_aspeed_vuart-Use-port-lock-wrappers.patch
delete mode 100644 buildroot-external/patches/linux/0032-sched-Add-support-for-lazy-preemption.patch
create mode 100644 buildroot-external/patches/linux/0032-serial-8250_bcm7271-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0033-serial-8250-Use-port-lock-wrappers.patch
delete mode 100644 buildroot-external/patches/linux/0033-x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch
create mode 100644 buildroot-external/patches/linux/0034-serial-8250_dma-Use-port-lock-wrappers.patch
delete mode 100644 buildroot-external/patches/linux/0034-x86-Support-for-lazy-preemption.patch
delete mode 100644 buildroot-external/patches/linux/0035-entry-Fix-the-preempt-lazy-fallout.patch
create mode 100644 buildroot-external/patches/linux/0035-serial-8250_dw-Use-port-lock-wrappers.patch
delete mode 100644 buildroot-external/patches/linux/0036-arm-Add-support-for-lazy-preemption.patch
create mode 100644 buildroot-external/patches/linux/0036-serial-8250_exar-Use-port-lock-wrappers.patch
delete mode 100644 buildroot-external/patches/linux/0037-powerpc-Add-support-for-lazy-preemption.patch
create mode 100644 buildroot-external/patches/linux/0037-serial-8250_fsl-Use-port-lock-wrappers.patch
delete mode 100644 buildroot-external/patches/linux/0038-arch-arm64-Add-lazy-preempt-support.patch
create mode 100644 buildroot-external/patches/linux/0038-serial-8250_mtk-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0039-serial-8250_omap-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0040-serial-8250_pci1xxxx-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0041-serial-altera_jtaguart-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0042-serial-altera_uart-Use-port-lock-wrappers.patch
delete mode 100644 buildroot-external/patches/linux/0042-tty-serial-pl011-Make-the-locking-work-on-RT.patch
create mode 100644 buildroot-external/patches/linux/0043-serial-amba-pl010-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0044-serial-amba-pl011-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0045-serial-apb-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0046-serial-ar933x-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0047-serial-arc_uart-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0048-serial-atmel-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0049-serial-bcm63xx-uart-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0050-serial-cpm_uart-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0051-serial-digicolor-Use-port-lock-wrappers.patch
delete mode 100644 buildroot-external/patches/linux/0052-Linux-6.1.46-rt13-REBASE.patch
create mode 100644 buildroot-external/patches/linux/0052-serial-dz-Use-port-lock-wrappers.patch
delete mode 100644 buildroot-external/patches/linux/0053-io-mapping-don-t-disable-preempt-on-RT-in-io_mapping.patch
create mode 100644 buildroot-external/patches/linux/0053-serial-linflexuart-Use-port-lock-wrappers.patch
delete mode 100644 buildroot-external/patches/linux/0054-locking-rwbase-Mitigate-indefinite-writer-starvation.patch
create mode 100644 buildroot-external/patches/linux/0054-serial-fsl_lpuart-Use-port-lock-wrappers.patch
delete mode 100644 buildroot-external/patches/linux/0055-revert-softirq-Let-ksoftirqd-do-its-job.patch
create mode 100644 buildroot-external/patches/linux/0055-serial-icom-Use-port-lock-wrappers.patch
delete mode 100644 buildroot-external/patches/linux/0056-debugobjects-locking-Annotate-debug_object_fill_pool.patch
create mode 100644 buildroot-external/patches/linux/0056-serial-imx-Use-port-lock-wrappers.patch
delete mode 100644 buildroot-external/patches/linux/0057-sched-avoid-false-lockdep-splat-in-put_task_struct.patch
create mode 100644 buildroot-external/patches/linux/0057-serial-ip22zilog-Use-port-lock-wrappers.patch
delete mode 100644 buildroot-external/patches/linux/0058-mm-page_alloc-Use-write_seqlock_irqsave-instead-writ.patch
create mode 100644 buildroot-external/patches/linux/0058-serial-jsm-Use-port-lock-wrappers.patch
delete mode 100644 buildroot-external/patches/linux/0059-bpf-Remove-in_atomic-from-bpf_link_put.patch
create mode 100644 buildroot-external/patches/linux/0059-serial-liteuart-Use-port-lock-wrappers.patch
delete mode 100644 buildroot-external/patches/linux/0060-posix-timers-Ensure-timer-ID-search-loop-limit-is-va.patch
create mode 100644 buildroot-external/patches/linux/0060-serial-lpc32xx_hs-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0061-serial-ma35d1-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0062-serial-mcf-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0063-serial-men_z135_uart-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0064-serial-meson-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0065-serial-milbeaut_usio-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0066-serial-mpc52xx-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0067-serial-mps2-uart-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0068-serial-msm-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0069-serial-mvebu-uart-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0070-serial-omap-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0071-serial-owl-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0072-serial-pch-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0073-serial-pic32-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0074-serial-pmac_zilog-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0075-serial-pxa-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0076-serial-qcom-geni-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0077-serial-rda-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0078-serial-rp2-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0079-serial-sa1100-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0080-serial-samsung_tty-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0081-serial-sb1250-duart-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0082-serial-sc16is7xx-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0083-serial-tegra-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0084-serial-core-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0085-serial-mctrl_gpio-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0086-serial-txx9-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0087-serial-sh-sci-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0088-serial-sifive-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0089-serial-sprd-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0090-serial-st-asc-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0091-serial-stm32-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0092-serial-sunhv-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0093-serial-sunplus-uart-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0094-serial-sunsab-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0095-serial-sunsu-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0096-serial-sunzilog-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0097-serial-timbuart-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0098-serial-uartlite-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0099-serial-ucc_uart-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0100-serial-vt8500-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0101-serial-xilinx_uartps-Use-port-lock-wrappers.patch
create mode 100644 buildroot-external/patches/linux/0102-printk-Add-non-BKL-nbcon-console-basic-infrastructur.patch
create mode 100644 buildroot-external/patches/linux/0103-printk-nbcon-Add-acquire-release-logic.patch
create mode 100644 buildroot-external/patches/linux/0104-printk-Make-static-printk-buffers-available-to-nbcon.patch
create mode 100644 buildroot-external/patches/linux/0105-printk-nbcon-Add-buffer-management.patch
create mode 100644 buildroot-external/patches/linux/0106-printk-nbcon-Add-ownership-state-functions.patch
create mode 100644 buildroot-external/patches/linux/0107-printk-nbcon-Add-sequence-handling.patch
create mode 100644 buildroot-external/patches/linux/0108-printk-nbcon-Add-emit-function-and-callback-function.patch
create mode 100644 buildroot-external/patches/linux/0109-printk-nbcon-Allow-drivers-to-mark-unsafe-regions-an.patch
create mode 100644 buildroot-external/patches/linux/0110-printk-fix-illegal-pbufs-access-for-CONFIG_PRINTK.patch
create mode 100644 buildroot-external/patches/linux/0111-printk-Reduce-pr_flush-pooling-time.patch
create mode 100644 buildroot-external/patches/linux/0112-printk-nbcon-Relocate-32bit-seq-macros.patch
create mode 100644 buildroot-external/patches/linux/0113-printk-Adjust-mapping-for-32bit-seq-macros.patch
create mode 100644 buildroot-external/patches/linux/0114-printk-Use-prb_first_seq-as-base-for-32bit-seq-macro.patch
create mode 100644 buildroot-external/patches/linux/0115-printk-ringbuffer-Do-not-skip-non-finalized-records-.patch
create mode 100644 buildroot-external/patches/linux/0116-printk-ringbuffer-Clarify-special-lpos-values.patch
create mode 100644 buildroot-external/patches/linux/0117-printk-For-suppress_panic_printk-check-for-other-CPU.patch
create mode 100644 buildroot-external/patches/linux/0118-printk-Add-this_cpu_in_panic.patch
create mode 100644 buildroot-external/patches/linux/0119-printk-ringbuffer-Cleanup-reader-terminology.patch
create mode 100644 buildroot-external/patches/linux/0120-printk-Wait-for-all-reserved-records-with-pr_flush.patch
create mode 100644 buildroot-external/patches/linux/0121-printk-ringbuffer-Skip-non-finalized-records-in-pani.patch
create mode 100644 buildroot-external/patches/linux/0122-printk-ringbuffer-Consider-committed-as-finalized-in.patch
create mode 100644 buildroot-external/patches/linux/0123-printk-Disable-passing-console-lock-owner-completely.patch
create mode 100644 buildroot-external/patches/linux/0124-printk-Avoid-non-panic-CPUs-writing-to-ringbuffer.patch
create mode 100644 buildroot-external/patches/linux/0125-panic-Flush-kernel-log-buffer-at-the-end.patch
create mode 100644 buildroot-external/patches/linux/0126-printk-Consider-nbcon-boot-consoles-on-seq-init.patch
create mode 100644 buildroot-external/patches/linux/0127-printk-Add-sparse-notation-to-console_srcu-locking.patch
create mode 100644 buildroot-external/patches/linux/0128-printk-nbcon-Ensure-ownership-release-on-failed-emit.patch
create mode 100644 buildroot-external/patches/linux/0129-printk-Check-printk_deferred_enter-_exit-usage.patch
create mode 100644 buildroot-external/patches/linux/0130-printk-nbcon-Implement-processing-in-port-lock-wrapp.patch
create mode 100644 buildroot-external/patches/linux/0131-printk-nbcon-Add-driver_enter-driver_exit-console-ca.patch
create mode 100644 buildroot-external/patches/linux/0132-printk-Make-console_is_usable-available-to-nbcon.patch
create mode 100644 buildroot-external/patches/linux/0133-printk-Let-console_is_usable-handle-nbcon.patch
create mode 100644 buildroot-external/patches/linux/0134-printk-Add-flags-argument-for-console_is_usable.patch
create mode 100644 buildroot-external/patches/linux/0135-printk-nbcon-Provide-function-to-flush-using-write_a.patch
create mode 100644 buildroot-external/patches/linux/0136-printk-Track-registered-boot-consoles.patch
create mode 100644 buildroot-external/patches/linux/0137-printk-nbcon-Use-nbcon-consoles-in-console_flush_all.patch
create mode 100644 buildroot-external/patches/linux/0138-printk-nbcon-Assign-priority-based-on-CPU-state.patch
create mode 100644 buildroot-external/patches/linux/0139-printk-nbcon-Add-unsafe-flushing-on-panic.patch
create mode 100644 buildroot-external/patches/linux/0140-printk-Avoid-console_lock-dance-if-no-legacy-or-boot.patch
create mode 100644 buildroot-external/patches/linux/0141-printk-Track-nbcon-consoles.patch
create mode 100644 buildroot-external/patches/linux/0142-printk-Coordinate-direct-printing-in-panic.patch
create mode 100644 buildroot-external/patches/linux/0143-printk-nbcon-Implement-emergency-sections.patch
create mode 100644 buildroot-external/patches/linux/0144-panic-Mark-emergency-section-in-warn.patch
create mode 100644 buildroot-external/patches/linux/0145-panic-Mark-emergency-section-in-oops.patch
create mode 100644 buildroot-external/patches/linux/0146-rcu-Mark-emergency-section-in-rcu-stalls.patch
create mode 100644 buildroot-external/patches/linux/0147-lockdep-Mark-emergency-section-in-lockdep-splats.patch
create mode 100644 buildroot-external/patches/linux/0148-printk-nbcon-Introduce-printing-kthreads.patch
create mode 100644 buildroot-external/patches/linux/0149-printk-Atomic-print-in-printk-context-on-shutdown.patch
create mode 100644 buildroot-external/patches/linux/0150-printk-nbcon-Add-context-to-console_is_usable.patch
create mode 100644 buildroot-external/patches/linux/0151-printk-nbcon-Add-printer-thread-wakeups.patch
create mode 100644 buildroot-external/patches/linux/0152-printk-nbcon-Stop-threads-on-shutdown-reboot.patch
create mode 100644 buildroot-external/patches/linux/0153-printk-nbcon-Start-printing-threads.patch
create mode 100644 buildroot-external/patches/linux/0154-proc-Add-nbcon-support-for-proc-consoles.patch
create mode 100644 buildroot-external/patches/linux/0155-tty-sysfs-Add-nbcon-support-for-active.patch
create mode 100644 buildroot-external/patches/linux/0156-printk-nbcon-Provide-function-to-reacquire-ownership.patch
create mode 100644 buildroot-external/patches/linux/0157-serial-core-Provide-low-level-functions-to-port-lock.patch
create mode 100644 buildroot-external/patches/linux/0158-serial-8250-Switch-to-nbcon-console.patch
create mode 100644 buildroot-external/patches/linux/0159-printk-Add-kthread-for-all-legacy-consoles.patch
create mode 100644 buildroot-external/patches/linux/0160-serial-8250-revert-drop-lockdep-annotation-from-seri.patch
create mode 100644 buildroot-external/patches/linux/0161-printk-Avoid-false-positive-lockdep-report-for-legac.patch
rename buildroot-external/patches/linux/{0023-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch => 0162-drm-i915-Use-preempt_disable-enable_rt-where-recomme.patch} (74%)
rename buildroot-external/patches/linux/{0024-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch => 0163-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch} (82%)
rename buildroot-external/patches/linux/{0025-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch => 0164-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch} (84%)
rename buildroot-external/patches/linux/{0026-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch => 0165-drm-i915-Disable-tracing-points-on-PREEMPT_RT.patch} (90%)
rename buildroot-external/patches/linux/{0027-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch => 0166-drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch} (81%)
rename buildroot-external/patches/linux/{0028-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch => 0167-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch} (92%)
rename buildroot-external/patches/linux/{0029-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch => 0168-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch} (84%)
rename buildroot-external/patches/linux/{0030-drm-i915-Drop-the-irqs_disabled-check.patch => 0169-drm-i915-Drop-the-irqs_disabled-check.patch} (88%)
rename buildroot-external/patches/linux/{0061-drm-i915-Do-not-disable-preemption-for-resets.patch => 0170-drm-i915-Do-not-disable-preemption-for-resets.patch} (77%)
create mode 100644 buildroot-external/patches/linux/0171-drm-i915-guc-Consider-also-RCU-depth-in-busy-loop.patch
rename buildroot-external/patches/linux/{0031-Revert-drm-i915-Depend-on-PREEMPT_RT.patch => 0172-Revert-drm-i915-Depend-on-PREEMPT_RT.patch} (81%)
create mode 100644 buildroot-external/patches/linux/0173-sched-define-TIF_ALLOW_RESCHED.patch
rename buildroot-external/patches/linux/{0039-arm-Disable-jump-label-on-PREEMPT_RT.patch => 0174-arm-Disable-jump-label-on-PREEMPT_RT.patch} (86%)
rename buildroot-external/patches/linux/{0040-ARM-enable-irq-in-translation-section-permission-fau.patch => 0175-ARM-enable-irq-in-translation-section-permission-fau.patch} (90%)
rename buildroot-external/patches/linux/{0041-tty-serial-omap-Make-the-locking-RT-aware.patch => 0176-tty-serial-omap-Make-the-locking-RT-aware.patch} (62%)
create mode 100644 buildroot-external/patches/linux/0177-tty-serial-pl011-Make-the-locking-work-on-RT.patch
create mode 100644 buildroot-external/patches/linux/0178-ARM-vfp-Provide-vfp_lock-for-VFP-locking.patch
create mode 100644 buildroot-external/patches/linux/0179-ARM-vfp-Use-vfp_lock-in-vfp_sync_hwstate.patch
create mode 100644 buildroot-external/patches/linux/0180-ARM-vfp-Use-vfp_lock-in-vfp_support_entry.patch
create mode 100644 buildroot-external/patches/linux/0181-ARM-vfp-Move-sending-signals-outside-of-vfp_lock-ed-.patch
rename buildroot-external/patches/linux/{0043-ARM-Allow-to-enable-RT.patch => 0182-ARM-Allow-to-enable-RT.patch} (79%)
rename buildroot-external/patches/linux/{0044-ARM64-Allow-to-enable-RT.patch => 0183-ARM64-Allow-to-enable-RT.patch} (68%)
rename buildroot-external/patches/linux/{0045-powerpc-traps-Use-PREEMPT_RT.patch => 0184-powerpc-traps-Use-PREEMPT_RT.patch} (88%)
rename buildroot-external/patches/linux/{0046-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch => 0185-powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch} (83%)
create mode 100644 buildroot-external/patches/linux/0186-powerpc-pseries-Select-the-generic-memory-allocator.patch
rename buildroot-external/patches/linux/{0047-powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch => 0187-powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch} (89%)
rename buildroot-external/patches/linux/{0048-powerpc-stackprotector-work-around-stack-guard-init-.patch => 0188-powerpc-stackprotector-work-around-stack-guard-init-.patch} (50%)
rename buildroot-external/patches/linux/{0049-POWERPC-Allow-to-enable-RT.patch => 0189-POWERPC-Allow-to-enable-RT.patch} (78%)
create mode 100644 buildroot-external/patches/linux/0190-RISC-V-Probe-misaligned-access-speed-in-parallel.patch
create mode 100644 buildroot-external/patches/linux/0191-riscv-add-PREEMPT_AUTO-support.patch
create mode 100644 buildroot-external/patches/linux/0192-riscv-allow-to-enable-RT.patch
rename buildroot-external/patches/linux/{0050-sysfs-Add-sys-kernel-realtime-entry.patch => 0193-sysfs-Add-sys-kernel-realtime-entry.patch} (81%)
rename buildroot-external/patches/linux/{0051-Add-localversion-for-RT-release.patch => 0194-Add-localversion-for-RT-release.patch} (69%)
rename buildroot-external/patches/linux/{0062-Linux-6.1.73-rt22-REBASE.patch => 0195-Linux-6.6.14-rt21-REBASE.patch} (59%)
diff --git a/buildroot b/buildroot
index 0d33775f..bdebc13b 160000
--- a/buildroot
+++ b/buildroot
@@ -1 +1 @@
-Subproject commit 0d33775f295ab64bc5eda1cabc2e7c91282dc150
+Subproject commit bdebc13b116e70b77333eee454f0683afcbf5d1a
diff --git a/buildroot-external/configs/ova_64_defconfig b/buildroot-external/configs/ova_64_defconfig
index 2f4f07c1..eb21972f 100644
--- a/buildroot-external/configs/ova_64_defconfig
+++ b/buildroot-external/configs/ova_64_defconfig
@@ -29,7 +29,7 @@ BR2_ROOTFS_POST_IMAGE_SCRIPT="$(BR2_EXTERNAL)/scripts/post-image.sh"
BR2_ROOTFS_POST_SCRIPT_ARGS="$(BR2_EXTERNAL)/board/ovos/ova"
BR2_LINUX_KERNEL=y
BR2_LINUX_KERNEL_CUSTOM_VERSION=y
-BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE="6.1.69"
+BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE="6.6.14"
BR2_LINUX_KERNEL_DEFCONFIG="x86_64"
BR2_LINUX_KERNEL_CONFIG_FRAGMENT_FILES="$(BR2_EXTERNAL)/kernel/ovos.config $(BR2_EXTERNAL)/kernel/device-drivers.config $(BR2_EXTERNAL)/kernel/docker.config $(BR2_EXTERNAL)/board/ovos/ova/kernel.config"
BR2_LINUX_KERNEL_LZ4=y
diff --git a/buildroot-external/configs/rpi3_64_defconfig b/buildroot-external/configs/rpi3_64_defconfig
index 3776a3da..51b97975 100644
--- a/buildroot-external/configs/rpi3_64_defconfig
+++ b/buildroot-external/configs/rpi3_64_defconfig
@@ -30,7 +30,7 @@ BR2_ROOTFS_POST_IMAGE_SCRIPT="$(BR2_EXTERNAL)/scripts/post-image.sh"
BR2_ROOTFS_POST_SCRIPT_ARGS="$(BR2_EXTERNAL)/board/ovos/raspberrypi/rpi3"
BR2_LINUX_KERNEL=y
BR2_LINUX_KERNEL_CUSTOM_TARBALL=y
-BR2_LINUX_KERNEL_CUSTOM_TARBALL_LOCATION="$(call github,raspberrypi,linux,3bb5880ab3dd31f75c07c3c33bf29c5d469b28f3)/linux-3bb5880ab3dd31f75c07c3c33bf29c5d469b28f3.tar.gz"
+BR2_LINUX_KERNEL_CUSTOM_TARBALL_LOCATION="$(call github,raspberrypi,linux,5e78d297b997dcc7a78ba747a62fb28d0b6a10d8)/linux-5e78d297b997dcc7a78ba747a62fb28d0b6a10d8.tar.gz"
BR2_LINUX_KERNEL_DEFCONFIG="bcmrpi3"
BR2_LINUX_KERNEL_CONFIG_FRAGMENT_FILES="$(BR2_EXTERNAL)/kernel/ovos.config $(BR2_EXTERNAL)/kernel/device-drivers.config $(BR2_EXTERNAL)/kernel/docker.config $(BR2_EXTERNAL)/board/ovos/raspberrypi/kernel.config"
BR2_LINUX_KERNEL_LZ4=y
diff --git a/buildroot-external/configs/rpi4_64_defconfig b/buildroot-external/configs/rpi4_64_defconfig
index 74f6b193..79fa0ca1 100644
--- a/buildroot-external/configs/rpi4_64_defconfig
+++ b/buildroot-external/configs/rpi4_64_defconfig
@@ -30,7 +30,7 @@ BR2_ROOTFS_POST_IMAGE_SCRIPT="$(BR2_EXTERNAL)/scripts/post-image.sh"
BR2_ROOTFS_POST_SCRIPT_ARGS="$(BR2_EXTERNAL)/board/ovos/raspberrypi/rpi4"
BR2_LINUX_KERNEL=y
BR2_LINUX_KERNEL_CUSTOM_TARBALL=y
-BR2_LINUX_KERNEL_CUSTOM_TARBALL_LOCATION="$(call github,raspberrypi,linux,3bb5880ab3dd31f75c07c3c33bf29c5d469b28f3)/linux-3bb5880ab3dd31f75c07c3c33bf29c5d469b28f3.tar.gz"
+BR2_LINUX_KERNEL_CUSTOM_TARBALL_LOCATION="$(call github,raspberrypi,linux,5e78d297b997dcc7a78ba747a62fb28d0b6a10d8)/linux-5e78d297b997dcc7a78ba747a62fb28d0b6a10d8.tar.gz"
BR2_LINUX_KERNEL_DEFCONFIG="bcm2711"
BR2_LINUX_KERNEL_CONFIG_FRAGMENT_FILES="$(BR2_EXTERNAL)/kernel/ovos.config $(BR2_EXTERNAL)/kernel/device-drivers.config $(BR2_EXTERNAL)/kernel/docker.config $(BR2_EXTERNAL)/board/ovos/raspberrypi/kernel.config"
BR2_LINUX_KERNEL_LZ4=y
diff --git a/buildroot-external/configs/x86_64_defconfig b/buildroot-external/configs/x86_64_defconfig
index c9b2c455..55efdd74 100644
--- a/buildroot-external/configs/x86_64_defconfig
+++ b/buildroot-external/configs/x86_64_defconfig
@@ -29,7 +29,7 @@ BR2_ROOTFS_POST_IMAGE_SCRIPT="$(BR2_EXTERNAL)/scripts/post-image.sh"
BR2_ROOTFS_POST_SCRIPT_ARGS="$(BR2_EXTERNAL)/board/ovos/pc"
BR2_LINUX_KERNEL=y
BR2_LINUX_KERNEL_CUSTOM_VERSION=y
-BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE="6.1.73"
+BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE="6.6.14"
BR2_LINUX_KERNEL_DEFCONFIG="x86_64"
BR2_LINUX_KERNEL_CONFIG_FRAGMENT_FILES="$(BR2_EXTERNAL)/kernel/ovos.config $(BR2_EXTERNAL)/kernel/device-drivers.config $(BR2_EXTERNAL)/kernel/docker.config $(BR2_EXTERNAL)/board/ovos/pc/kernel.config"
BR2_LINUX_KERNEL_LZ4=y
diff --git a/buildroot-external/patches/linux/0001-sched-Constrain-locks-in-sched_submit_work.patch b/buildroot-external/patches/linux/0001-sched-Constrain-locks-in-sched_submit_work.patch
new file mode 100644
index 00000000..52e60f36
--- /dev/null
+++ b/buildroot-external/patches/linux/0001-sched-Constrain-locks-in-sched_submit_work.patch
@@ -0,0 +1,54 @@
+From 71103fe2c85e989e7b5374cead80c0d75425f1de Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra
+Date: Fri, 8 Sep 2023 18:22:48 +0200
+Subject: [PATCH 001/195] sched: Constrain locks in sched_submit_work()
+
+Even though sched_submit_work() is ran from preemptible context,
+it is discouraged to have it use blocking locks due to the recursion
+potential.
+
+Enforce this.
+
+Signed-off-by: Peter Zijlstra (Intel)
+Signed-off-by: Sebastian Andrzej Siewior
+Signed-off-by: Peter Zijlstra (Intel)
+Link: https://lkml.kernel.org/r/20230908162254.999499-2-bigeasy@linutronix.de
+---
+ kernel/sched/core.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index a854b71836dd..a9bf40d18cec 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -6721,11 +6721,18 @@ void __noreturn do_task_dead(void)
+
+ static inline void sched_submit_work(struct task_struct *tsk)
+ {
++ static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
+ unsigned int task_flags;
+
+ if (task_is_running(tsk))
+ return;
+
++ /*
++ * Establish LD_WAIT_CONFIG context to ensure none of the code called
++ * will use a blocking primitive -- which would lead to recursion.
++ */
++ lock_map_acquire_try(&sched_map);
++
+ task_flags = tsk->flags;
+ /*
+ * If a worker goes to sleep, notify and ask workqueue whether it
+@@ -6750,6 +6757,8 @@ static inline void sched_submit_work(struct task_struct *tsk)
+ * make sure to submit it to avoid deadlocks.
+ */
+ blk_flush_plug(tsk->plug, true);
++
++ lock_map_release(&sched_map);
+ }
+
+ static void sched_update_worker(struct task_struct *tsk)
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0001-vduse-Remove-include-of-rwlock.h.patch b/buildroot-external/patches/linux/0001-vduse-Remove-include-of-rwlock.h.patch
deleted file mode 100644
index 3278ae66..00000000
--- a/buildroot-external/patches/linux/0001-vduse-Remove-include-of-rwlock.h.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From fad2ed9bfa2fce870133fadd15b4d12a26213096 Mon Sep 17 00:00:00 2001
-From: Sebastian Andrzej Siewior
-Date: Tue, 16 Aug 2022 09:45:22 +0200
-Subject: [PATCH 01/62] vduse: Remove include of rwlock.h
-
-rwlock.h should not be included directly. Instead linux/splinlock.h
-should be included. Including it directly will break the RT build.
-
-Remove the rwlock.h include.
-
-Signed-off-by: Sebastian Andrzej Siewior
-Acked-by: Michael S. Tsirkin
-Link: https://lkml.kernel.org/r/20221026134407.711768-1-bigeasy@linutronix.de
----
- drivers/vdpa/vdpa_user/iova_domain.h | 1 -
- 1 file changed, 1 deletion(-)
-
-diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
-index 4e0e50e7ac15..173e979b84a9 100644
---- a/drivers/vdpa/vdpa_user/iova_domain.h
-+++ b/drivers/vdpa/vdpa_user/iova_domain.h
-@@ -14,7 +14,6 @@
- #include
- #include
- #include
--#include
-
- #define IOVA_START_PFN 1
-
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0002-locking-rtmutex-Avoid-unconditional-slowpath-for-DEB.patch b/buildroot-external/patches/linux/0002-locking-rtmutex-Avoid-unconditional-slowpath-for-DEB.patch
new file mode 100644
index 00000000..c38c8f34
--- /dev/null
+++ b/buildroot-external/patches/linux/0002-locking-rtmutex-Avoid-unconditional-slowpath-for-DEB.patch
@@ -0,0 +1,86 @@
+From 30c0fabb5665235a7e6dd945be69945e9c6dc069 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior
+Date: Fri, 8 Sep 2023 18:22:49 +0200
+Subject: [PATCH 002/195] locking/rtmutex: Avoid unconditional slowpath for
+ DEBUG_RT_MUTEXES
+
+With DEBUG_RT_MUTEXES enabled the fast-path rt_mutex_cmpxchg_acquire()
+always fails and all lock operations take the slow path.
+
+Provide a new helper inline rt_mutex_try_acquire() which maps to
+rt_mutex_cmpxchg_acquire() in the non-debug case. For the debug case
+it invokes rt_mutex_slowtrylock() which can acquire a non-contended
+rtmutex under full debug coverage.
+
+Signed-off-by: Sebastian Andrzej Siewior
+Signed-off-by: Thomas Gleixner
+Signed-off-by: Sebastian Andrzej Siewior
+Signed-off-by: Peter Zijlstra (Intel)
+Link: https://lkml.kernel.org/r/20230908162254.999499-3-bigeasy@linutronix.de
+---
+ kernel/locking/rtmutex.c | 21 ++++++++++++++++++++-
+ kernel/locking/ww_rt_mutex.c | 2 +-
+ 2 files changed, 21 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 21db0df0eb00..bcec0533a0cc 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -218,6 +218,11 @@ static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
+ return try_cmpxchg_acquire(&lock->owner, &old, new);
+ }
+
++static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
++{
++ return rt_mutex_cmpxchg_acquire(lock, NULL, current);
++}
++
+ static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
+ struct task_struct *old,
+ struct task_struct *new)
+@@ -297,6 +302,20 @@ static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
+
+ }
+
++static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock);
++
++static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
++{
++ /*
++ * With debug enabled rt_mutex_cmpxchg trylock() will always fail.
++ *
++ * Avoid unconditionally taking the slow path by using
++ * rt_mutex_slow_trylock() which is covered by the debug code and can
++ * acquire a non-contended rtmutex.
++ */
++ return rt_mutex_slowtrylock(lock);
++}
++
+ static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
+ struct task_struct *old,
+ struct task_struct *new)
+@@ -1755,7 +1774,7 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
+ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
+ unsigned int state)
+ {
+- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
++ if (likely(rt_mutex_try_acquire(lock)))
+ return 0;
+
+ return rt_mutex_slowlock(lock, NULL, state);
+diff --git a/kernel/locking/ww_rt_mutex.c b/kernel/locking/ww_rt_mutex.c
+index d1473c624105..c7196de838ed 100644
+--- a/kernel/locking/ww_rt_mutex.c
++++ b/kernel/locking/ww_rt_mutex.c
+@@ -62,7 +62,7 @@ __ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx,
+ }
+ mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip);
+
+- if (likely(rt_mutex_cmpxchg_acquire(&rtm->rtmutex, NULL, current))) {
++ if (likely(rt_mutex_try_acquire(&rtm->rtmutex))) {
+ if (ww_ctx)
+ ww_mutex_set_context_fastpath(lock, ww_ctx);
+ return 0;
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0002-signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch b/buildroot-external/patches/linux/0002-signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch
deleted file mode 100644
index ef409373..00000000
--- a/buildroot-external/patches/linux/0002-signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch
+++ /dev/null
@@ -1,65 +0,0 @@
-From 2defd6085e9803ba06f6b56f6f901309462761a6 Mon Sep 17 00:00:00 2001
-From: Sebastian Andrzej Siewior
-Date: Wed, 22 Jun 2022 11:36:17 +0200
-Subject: [PATCH 02/62] signal: Don't disable preemption in ptrace_stop() on
- PREEMPT_RT.
-
-Commit
- 53da1d9456fe7 ("fix ptrace slowness")
-
-is just band aid around the problem.
-The invocation of do_notify_parent_cldstop() wakes the parent and makes
-it runnable. The scheduler then wants to replace this still running task
-with the parent. With the read_lock() acquired this is not possible
-because preemption is disabled and so this is deferred until read_unlock().
-This scheduling point is undesired and is avoided by disabling preemption
-around the unlock operation enabled again before the schedule() invocation
-without a preemption point.
-This is only undesired because the parent sleeps a cycle in
-wait_task_inactive() until the traced task leaves the run-queue in
-schedule(). It is not a correctness issue, it is just band aid to avoid the
-visbile delay which sums up over multiple invocations.
-The task can still be preempted if an interrupt occurs between
-preempt_enable_no_resched() and freezable_schedule() because on the IRQ-exit
-path of the interrupt scheduling _will_ happen. This is ignored since it does
-not happen very often.
-
-On PREEMPT_RT keeping preemption disabled during the invocation of
-cgroup_enter_frozen() becomes a problem because the function acquires
-css_set_lock which is a sleeping lock on PREEMPT_RT and must not be
-acquired with disabled preemption.
-
-Don't disable preemption on PREEMPT_RT. Remove the TODO regarding adding
-read_unlock_no_resched() as there is no need for it and will cause harm.
-
-Signed-off-by: Sebastian Andrzej Siewior
-Link: https://lkml.kernel.org/r/20220720154435.232749-2-bigeasy@linutronix.de
----
- kernel/signal.c | 8 ++++----
- 1 file changed, 4 insertions(+), 4 deletions(-)
-
-diff --git a/kernel/signal.c b/kernel/signal.c
-index 5d45f5da2b36..58e919c7c936 100644
---- a/kernel/signal.c
-+++ b/kernel/signal.c
-@@ -2302,13 +2302,13 @@ static int ptrace_stop(int exit_code, int why, unsigned long message,
- /*
- * Don't want to allow preemption here, because
- * sys_ptrace() needs this task to be inactive.
-- *
-- * XXX: implement read_unlock_no_resched().
- */
-- preempt_disable();
-+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
-+ preempt_disable();
- read_unlock(&tasklist_lock);
- cgroup_enter_frozen();
-- preempt_enable_no_resched();
-+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
-+ preempt_enable_no_resched();
- schedule();
- cgroup_leave_frozen(true);
-
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0003-sched-Consider-task_struct-saved_state-in-wait_task_.patch b/buildroot-external/patches/linux/0003-sched-Consider-task_struct-saved_state-in-wait_task_.patch
deleted file mode 100644
index 208bd5bf..00000000
--- a/buildroot-external/patches/linux/0003-sched-Consider-task_struct-saved_state-in-wait_task_.patch
+++ /dev/null
@@ -1,151 +0,0 @@
-From af0232b0cb6d5afed7dab020d7dca4759ad93757 Mon Sep 17 00:00:00 2001
-From: Sebastian Andrzej Siewior
-Date: Wed, 22 Jun 2022 12:27:05 +0200
-Subject: [PATCH 03/62] sched: Consider task_struct::saved_state in
- wait_task_inactive().
-
-Ptrace is using wait_task_inactive() to wait for the tracee to reach a
-certain task state. On PREEMPT_RT that state may be stored in
-task_struct::saved_state while the tracee blocks on a sleeping lock and
-task_struct::__state is set to TASK_RTLOCK_WAIT.
-It is not possible to check only for TASK_RTLOCK_WAIT to be sure that the task
-is blocked on a sleeping lock because during wake up (after the sleeping lock
-has been acquired) the task state is set TASK_RUNNING. After the task in on CPU
-and acquired the pi_lock it will reset the state accordingly but until then
-TASK_RUNNING will be observed (with the desired state saved in saved_state).
-
-Check also for task_struct::saved_state if the desired match was not found in
-task_struct::__state on PREEMPT_RT. If the state was found in saved_state, wait
-until the task is idle and state is visible in task_struct::__state.
-
-Signed-off-by: Sebastian Andrzej Siewior
-Reviewed-by: Valentin Schneider
-Link: https://lkml.kernel.org/r/Yt%2FpQAFQ1xKNK0RY@linutronix.de
----
- kernel/sched/core.c | 81 ++++++++++++++++++++++++++++++++++++++++++---
- 1 file changed, 76 insertions(+), 5 deletions(-)
-
-diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 18a4f8f28a25..6bd06122850a 100644
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -3281,6 +3281,76 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
- }
- #endif /* CONFIG_NUMA_BALANCING */
-
-+#ifdef CONFIG_PREEMPT_RT
-+
-+/*
-+ * Consider:
-+ *
-+ * set_special_state(X);
-+ *
-+ * do_things()
-+ * // Somewhere in there is an rtlock that can be contended:
-+ * current_save_and_set_rtlock_wait_state();
-+ * [...]
-+ * schedule_rtlock(); (A)
-+ * [...]
-+ * current_restore_rtlock_saved_state();
-+ *
-+ * schedule(); (B)
-+ *
-+ * If p->saved_state is anything else than TASK_RUNNING, then p blocked on an
-+ * rtlock (A) *before* voluntarily calling into schedule() (B) after setting its
-+ * state to X. For things like ptrace (X=TASK_TRACED), the task could have more
-+ * work to do upon acquiring the lock in do_things() before whoever called
-+ * wait_task_inactive() should return. IOW, we have to wait for:
-+ *
-+ * p.saved_state = TASK_RUNNING
-+ * p.__state = X
-+ *
-+ * which implies the task isn't blocked on an RT lock and got to schedule() (B).
-+ *
-+ * Also see comments in ttwu_state_match().
-+ */
-+
-+static __always_inline bool state_mismatch(struct task_struct *p, unsigned int match_state)
-+{
-+ unsigned long flags;
-+ bool mismatch;
-+
-+ raw_spin_lock_irqsave(&p->pi_lock, flags);
-+ if (READ_ONCE(p->__state) & match_state)
-+ mismatch = false;
-+ else if (READ_ONCE(p->saved_state) & match_state)
-+ mismatch = false;
-+ else
-+ mismatch = true;
-+
-+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+ return mismatch;
-+}
-+static __always_inline bool state_match(struct task_struct *p, unsigned int match_state,
-+ bool *wait)
-+{
-+ if (READ_ONCE(p->__state) & match_state)
-+ return true;
-+ if (READ_ONCE(p->saved_state) & match_state) {
-+ *wait = true;
-+ return true;
-+ }
-+ return false;
-+}
-+#else
-+static __always_inline bool state_mismatch(struct task_struct *p, unsigned int match_state)
-+{
-+ return !(READ_ONCE(p->__state) & match_state);
-+}
-+static __always_inline bool state_match(struct task_struct *p, unsigned int match_state,
-+ bool *wait)
-+{
-+ return (READ_ONCE(p->__state) & match_state);
-+}
-+#endif
-+
- /*
- * wait_task_inactive - wait for a thread to unschedule.
- *
-@@ -3299,7 +3369,7 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
- */
- unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
- {
-- int running, queued;
-+ bool running, wait;
- struct rq_flags rf;
- unsigned long ncsw;
- struct rq *rq;
-@@ -3325,7 +3395,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
- * is actually now running somewhere else!
- */
- while (task_on_cpu(rq, p)) {
-- if (!(READ_ONCE(p->__state) & match_state))
-+ if (state_mismatch(p, match_state))
- return 0;
- cpu_relax();
- }
-@@ -3338,9 +3408,10 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
- rq = task_rq_lock(p, &rf);
- trace_sched_wait_task(p);
- running = task_on_cpu(rq, p);
-- queued = task_on_rq_queued(p);
-+ wait = task_on_rq_queued(p);
- ncsw = 0;
-- if (READ_ONCE(p->__state) & match_state)
-+
-+ if (state_match(p, match_state, &wait))
- ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
- task_rq_unlock(rq, p, &rf);
-
-@@ -3370,7 +3441,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
- * running right now), it's preempted, and we should
- * yield - it could be a while.
- */
-- if (unlikely(queued)) {
-+ if (unlikely(wait)) {
- ktime_t to = NSEC_PER_SEC / HZ;
-
- set_current_state(TASK_UNINTERRUPTIBLE);
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0003-sched-Extract-__schedule_loop.patch b/buildroot-external/patches/linux/0003-sched-Extract-__schedule_loop.patch
new file mode 100644
index 00000000..55185166
--- /dev/null
+++ b/buildroot-external/patches/linux/0003-sched-Extract-__schedule_loop.patch
@@ -0,0 +1,63 @@
+From 67e70cee63df0dcf79656f4902fb1a563a9bd28f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner
+Date: Fri, 8 Sep 2023 18:22:50 +0200
+Subject: [PATCH 003/195] sched: Extract __schedule_loop()
+
+There are currently two implementations of this basic __schedule()
+loop, and there is soon to be a third.
+
+Signed-off-by: Thomas Gleixner
+Signed-off-by: Sebastian Andrzej Siewior
+Signed-off-by: Peter Zijlstra (Intel)
+Link: https://lkml.kernel.org/r/20230908162254.999499-4-bigeasy@linutronix.de
+---
+ kernel/sched/core.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index a9bf40d18cec..ed5f5e3f6239 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -6771,16 +6771,21 @@ static void sched_update_worker(struct task_struct *tsk)
+ }
+ }
+
+-asmlinkage __visible void __sched schedule(void)
++static __always_inline void __schedule_loop(unsigned int sched_mode)
+ {
+- struct task_struct *tsk = current;
+-
+- sched_submit_work(tsk);
+ do {
+ preempt_disable();
+- __schedule(SM_NONE);
++ __schedule(sched_mode);
+ sched_preempt_enable_no_resched();
+ } while (need_resched());
++}
++
++asmlinkage __visible void __sched schedule(void)
++{
++ struct task_struct *tsk = current;
++
++ sched_submit_work(tsk);
++ __schedule_loop(SM_NONE);
+ sched_update_worker(tsk);
+ }
+ EXPORT_SYMBOL(schedule);
+@@ -6844,11 +6849,7 @@ void __sched schedule_preempt_disabled(void)
+ #ifdef CONFIG_PREEMPT_RT
+ void __sched notrace schedule_rtlock(void)
+ {
+- do {
+- preempt_disable();
+- __schedule(SM_RTLOCK_WAIT);
+- sched_preempt_enable_no_resched();
+- } while (need_resched());
++ __schedule_loop(SM_RTLOCK_WAIT);
+ }
+ NOKPROBE_SYMBOL(schedule_rtlock);
+ #endif
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0004-sched-Provide-rt_mutex-specific-scheduler-helpers.patch b/buildroot-external/patches/linux/0004-sched-Provide-rt_mutex-specific-scheduler-helpers.patch
new file mode 100644
index 00000000..3ee2984a
--- /dev/null
+++ b/buildroot-external/patches/linux/0004-sched-Provide-rt_mutex-specific-scheduler-helpers.patch
@@ -0,0 +1,137 @@
+From 9e47eb505e7f3ed5408d3d66e91ebb6982768023 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra
+Date: Fri, 8 Sep 2023 18:22:51 +0200
+Subject: [PATCH 004/195] sched: Provide rt_mutex specific scheduler helpers
+
+With PREEMPT_RT there is a rt_mutex recursion problem where
+sched_submit_work() can use an rtlock (aka spinlock_t). More
+specifically what happens is:
+
+ mutex_lock() /* really rt_mutex */
+ ...
+ __rt_mutex_slowlock_locked()
+ task_blocks_on_rt_mutex()
+ // enqueue current task as waiter
+ // do PI chain walk
+ rt_mutex_slowlock_block()
+ schedule()
+ sched_submit_work()
+ ...
+ spin_lock() /* really rtlock */
+ ...
+ __rt_mutex_slowlock_locked()
+ task_blocks_on_rt_mutex()
+ // enqueue current task as waiter *AGAIN*
+ // *CONFUSION*
+
+Fix this by making rt_mutex do the sched_submit_work() early, before
+it enqueues itself as a waiter -- before it even knows *if* it will
+wait.
+
+[[ basically Thomas' patch but with different naming and a few asserts
+ added ]]
+
+Originally-by: Thomas Gleixner
+Signed-off-by: Peter Zijlstra (Intel)
+Signed-off-by: Sebastian Andrzej Siewior
+Signed-off-by: Peter Zijlstra (Intel)
+Link: https://lkml.kernel.org/r/20230908162254.999499-5-bigeasy@linutronix.de
+---
+ include/linux/sched.h | 3 +++
+ include/linux/sched/rt.h | 4 ++++
+ kernel/sched/core.c | 36 ++++++++++++++++++++++++++++++++----
+ 3 files changed, 39 insertions(+), 4 deletions(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 77f01ac385f7..67623ffd4a8e 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -911,6 +911,9 @@ struct task_struct {
+ * ->sched_remote_wakeup gets used, so it can be in this word.
+ */
+ unsigned sched_remote_wakeup:1;
++#ifdef CONFIG_RT_MUTEXES
++ unsigned sched_rt_mutex:1;
++#endif
+
+ /* Bit to tell LSMs we're in execve(): */
+ unsigned in_execve:1;
+diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
+index 994c25640e15..b2b9e6eb9683 100644
+--- a/include/linux/sched/rt.h
++++ b/include/linux/sched/rt.h
+@@ -30,6 +30,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
+ }
+
+ #ifdef CONFIG_RT_MUTEXES
++extern void rt_mutex_pre_schedule(void);
++extern void rt_mutex_schedule(void);
++extern void rt_mutex_post_schedule(void);
++
+ /*
+ * Must hold either p->pi_lock or task_rq(p)->lock.
+ */
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index ed5f5e3f6239..90f9124ac027 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -6724,9 +6724,6 @@ static inline void sched_submit_work(struct task_struct *tsk)
+ static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
+ unsigned int task_flags;
+
+- if (task_is_running(tsk))
+- return;
+-
+ /*
+ * Establish LD_WAIT_CONFIG context to ensure none of the code called
+ * will use a blocking primitive -- which would lead to recursion.
+@@ -6784,7 +6781,12 @@ asmlinkage __visible void __sched schedule(void)
+ {
+ struct task_struct *tsk = current;
+
+- sched_submit_work(tsk);
++#ifdef CONFIG_RT_MUTEXES
++ lockdep_assert(!tsk->sched_rt_mutex);
++#endif
++
++ if (!task_is_running(tsk))
++ sched_submit_work(tsk);
+ __schedule_loop(SM_NONE);
+ sched_update_worker(tsk);
+ }
+@@ -7045,6 +7047,32 @@ static void __setscheduler_prio(struct task_struct *p, int prio)
+
+ #ifdef CONFIG_RT_MUTEXES
+
++/*
++ * Would be more useful with typeof()/auto_type but they don't mix with
++ * bit-fields. Since it's a local thing, use int. Keep the generic sounding
++ * name such that if someone were to implement this function we get to compare
++ * notes.
++ */
++#define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
++
++void rt_mutex_pre_schedule(void)
++{
++ lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
++ sched_submit_work(current);
++}
++
++void rt_mutex_schedule(void)
++{
++ lockdep_assert(current->sched_rt_mutex);
++ __schedule_loop(SM_NONE);
++}
++
++void rt_mutex_post_schedule(void)
++{
++ sched_update_worker(current);
++ lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
++}
++
+ static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
+ {
+ if (pi_task)
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0004-spi-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch b/buildroot-external/patches/linux/0004-spi-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch
deleted file mode 100644
index fa97e99e..00000000
--- a/buildroot-external/patches/linux/0004-spi-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-From 501df2b62fce3d3cea107bfc7c8c28283a62dc01 Mon Sep 17 00:00:00 2001
-From: Thomas Gleixner
-Date: Thu, 25 Aug 2022 16:15:32 +0200
-Subject: [PATCH 04/62] spi: Remove the obsolte u64_stats_fetch_*_irq() users.
-
-Now that the 32bit UP oddity is gone and 32bit uses always a sequence
-count, there is no need for the fetch_irq() variants anymore.
-
-Convert to the regular interface.
-
-Cc: Mark Brown
-Cc: linux-spi@vger.kernel.org
-Signed-off-by: Thomas Gleixner
-Signed-off-by: Sebastian Andrzej Siewior
-Acked-by: Peter Zijlstra (Intel)
----
- drivers/spi/spi.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
-index 22d227878bc4..75cc8bbc2b14 100644
---- a/drivers/spi/spi.c
-+++ b/drivers/spi/spi.c
-@@ -127,10 +127,10 @@ do { \
- unsigned int start; \
- pcpu_stats = per_cpu_ptr(in, i); \
- do { \
-- start = u64_stats_fetch_begin_irq( \
-+ start = u64_stats_fetch_begin( \
- &pcpu_stats->syncp); \
- inc = u64_stats_read(&pcpu_stats->field); \
-- } while (u64_stats_fetch_retry_irq( \
-+ } while (u64_stats_fetch_retry( \
- &pcpu_stats->syncp, start)); \
- ret += inc; \
- } \
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0005-locking-rtmutex-Use-rt_mutex-specific-scheduler-help.patch b/buildroot-external/patches/linux/0005-locking-rtmutex-Use-rt_mutex-specific-scheduler-help.patch
new file mode 100644
index 00000000..aba1d68e
--- /dev/null
+++ b/buildroot-external/patches/linux/0005-locking-rtmutex-Use-rt_mutex-specific-scheduler-help.patch
@@ -0,0 +1,191 @@
+From be4c4dd6d8b7daa71457f2d02eead010dc5b77b5 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior
+Date: Fri, 8 Sep 2023 18:22:52 +0200
+Subject: [PATCH 005/195] locking/rtmutex: Use rt_mutex specific scheduler
+ helpers
+
+Have rt_mutex use the rt_mutex specific scheduler helpers to avoid
+recursion vs rtlock on the PI state.
+
+[[ peterz: adapted to new names ]]
+
+Reported-by: Crystal Wood
+Signed-off-by: Sebastian Andrzej Siewior
+Signed-off-by: Peter Zijlstra (Intel)
+Link: https://lkml.kernel.org/r/20230908162254.999499-6-bigeasy@linutronix.de
+---
+ kernel/futex/pi.c | 11 +++++++++++
+ kernel/locking/rtmutex.c | 14 ++++++++++++--
+ kernel/locking/rwbase_rt.c | 6 ++++++
+ kernel/locking/rwsem.c | 8 +++++++-
+ kernel/locking/spinlock_rt.c | 4 ++++
+ 5 files changed, 40 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/futex/pi.c b/kernel/futex/pi.c
+index ce2889f12375..f8e65b27d9d6 100644
+--- a/kernel/futex/pi.c
++++ b/kernel/futex/pi.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0-or-later
+
+ #include
++#include
+ #include
+
+ #include "futex.h"
+@@ -1002,6 +1003,12 @@ int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int tryl
+ goto no_block;
+ }
+
++ /*
++ * Must be done before we enqueue the waiter, here is unfortunately
++ * under the hb lock, but that *should* work because it does nothing.
++ */
++ rt_mutex_pre_schedule();
++
+ rt_mutex_init_waiter(&rt_waiter);
+
+ /*
+@@ -1052,6 +1059,10 @@ int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int tryl
+ if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
+ ret = 0;
+
++ /*
++ * Waiter is unqueued.
++ */
++ rt_mutex_post_schedule();
+ no_block:
+ /*
+ * Fixup the pi_state owner and possibly acquire the lock if we
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index bcec0533a0cc..a3fe05dfd0d8 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1632,7 +1632,7 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
+ raw_spin_unlock_irq(&lock->wait_lock);
+
+ if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
+- schedule();
++ rt_mutex_schedule();
+
+ raw_spin_lock_irq(&lock->wait_lock);
+ set_current_state(state);
+@@ -1661,7 +1661,7 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
+ WARN(1, "rtmutex deadlock detected\n");
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+- schedule();
++ rt_mutex_schedule();
+ }
+ }
+
+@@ -1756,6 +1756,15 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
+ unsigned long flags;
+ int ret;
+
++ /*
++ * Do all pre-schedule work here, before we queue a waiter and invoke
++ * PI -- any such work that trips on rtlock (PREEMPT_RT spinlock) would
++ * otherwise recurse back into task_blocks_on_rt_mutex() through
++ * rtlock_slowlock() and will then enqueue a second waiter for this
++ * same task and things get really confusing real fast.
++ */
++ rt_mutex_pre_schedule();
++
+ /*
+ * Technically we could use raw_spin_[un]lock_irq() here, but this can
+ * be called in early boot if the cmpxchg() fast path is disabled
+@@ -1767,6 +1776,7 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
+ ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state);
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
++ rt_mutex_post_schedule();
+
+ return ret;
+ }
+diff --git a/kernel/locking/rwbase_rt.c b/kernel/locking/rwbase_rt.c
+index 25ec0239477c..c7258cb32d91 100644
+--- a/kernel/locking/rwbase_rt.c
++++ b/kernel/locking/rwbase_rt.c
+@@ -71,6 +71,7 @@ static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
+ struct rt_mutex_base *rtm = &rwb->rtmutex;
+ int ret;
+
++ rwbase_pre_schedule();
+ raw_spin_lock_irq(&rtm->wait_lock);
+
+ /*
+@@ -125,6 +126,7 @@ static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
+ rwbase_rtmutex_unlock(rtm);
+
+ trace_contention_end(rwb, ret);
++ rwbase_post_schedule();
+ return ret;
+ }
+
+@@ -237,6 +239,8 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
+ /* Force readers into slow path */
+ atomic_sub(READER_BIAS, &rwb->readers);
+
++ rwbase_pre_schedule();
++
+ raw_spin_lock_irqsave(&rtm->wait_lock, flags);
+ if (__rwbase_write_trylock(rwb))
+ goto out_unlock;
+@@ -248,6 +252,7 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
+ if (rwbase_signal_pending_state(state, current)) {
+ rwbase_restore_current_state();
+ __rwbase_write_unlock(rwb, 0, flags);
++ rwbase_post_schedule();
+ trace_contention_end(rwb, -EINTR);
+ return -EINTR;
+ }
+@@ -266,6 +271,7 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
+
+ out_unlock:
+ raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
++ rwbase_post_schedule();
+ return 0;
+ }
+
+diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
+index 9eabd585ce7a..2340b6d90ec6 100644
+--- a/kernel/locking/rwsem.c
++++ b/kernel/locking/rwsem.c
+@@ -1427,8 +1427,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
+ #define rwbase_signal_pending_state(state, current) \
+ signal_pending_state(state, current)
+
++#define rwbase_pre_schedule() \
++ rt_mutex_pre_schedule()
++
+ #define rwbase_schedule() \
+- schedule()
++ rt_mutex_schedule()
++
++#define rwbase_post_schedule() \
++ rt_mutex_post_schedule()
+
+ #include "rwbase_rt.c"
+
+diff --git a/kernel/locking/spinlock_rt.c b/kernel/locking/spinlock_rt.c
+index 48a19ed8486d..842037b2ba54 100644
+--- a/kernel/locking/spinlock_rt.c
++++ b/kernel/locking/spinlock_rt.c
+@@ -184,9 +184,13 @@ static __always_inline int rwbase_rtmutex_trylock(struct rt_mutex_base *rtm)
+
+ #define rwbase_signal_pending_state(state, current) (0)
+
++#define rwbase_pre_schedule()
++
+ #define rwbase_schedule() \
+ schedule_rtlock()
+
++#define rwbase_post_schedule()
++
+ #include "rwbase_rt.c"
+ /*
+ * The common functions which get wrapped into the rwlock API.
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0005-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users-d.patch b/buildroot-external/patches/linux/0005-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users-d.patch
deleted file mode 100644
index 116c6f93..00000000
--- a/buildroot-external/patches/linux/0005-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users-d.patch
+++ /dev/null
@@ -1,2150 +0,0 @@
-From 06b58043930deffafd35aacc0b3f2af41ea720b3 Mon Sep 17 00:00:00 2001
-From: Thomas Gleixner
-Date: Thu, 25 Aug 2022 16:15:44 +0200
-Subject: [PATCH 05/62] net: Remove the obsolte u64_stats_fetch_*_irq() users
- (drivers).
-
-Now that the 32bit UP oddity is gone and 32bit uses always a sequence
-count, there is no need for the fetch_irq() variants anymore.
-
-Convert to the regular interface.
-
-Signed-off-by: Thomas Gleixner
-Signed-off-by: Sebastian Andrzej Siewior
-Acked-by: Peter Zijlstra (Intel)
----
- drivers/net/ethernet/alacritech/slic.h | 12 +++----
- drivers/net/ethernet/amazon/ena/ena_ethtool.c | 4 +--
- drivers/net/ethernet/amazon/ena/ena_netdev.c | 12 +++----
- .../net/ethernet/aquantia/atlantic/aq_ring.c | 8 ++---
- drivers/net/ethernet/asix/ax88796c_main.c | 4 +--
- drivers/net/ethernet/broadcom/b44.c | 8 ++---
- drivers/net/ethernet/broadcom/bcmsysport.c | 12 +++----
- drivers/net/ethernet/cortina/gemini.c | 24 +++++++-------
- .../net/ethernet/emulex/benet/be_ethtool.c | 12 +++----
- drivers/net/ethernet/emulex/benet/be_main.c | 16 +++++-----
- .../ethernet/fungible/funeth/funeth_txrx.h | 4 +--
- drivers/net/ethernet/google/gve/gve_ethtool.c | 16 +++++-----
- drivers/net/ethernet/google/gve/gve_main.c | 12 +++----
- .../net/ethernet/hisilicon/hns3/hns3_enet.c | 4 +--
- drivers/net/ethernet/huawei/hinic/hinic_rx.c | 4 +--
- drivers/net/ethernet/huawei/hinic/hinic_tx.c | 4 +--
- .../net/ethernet/intel/fm10k/fm10k_netdev.c | 8 ++---
- .../net/ethernet/intel/i40e/i40e_ethtool.c | 8 ++---
- drivers/net/ethernet/intel/i40e/i40e_main.c | 20 ++++++------
- .../net/ethernet/intel/iavf/iavf_ethtool.c | 8 ++---
- drivers/net/ethernet/intel/ice/ice_main.c | 4 +--
- drivers/net/ethernet/intel/igb/igb_ethtool.c | 12 +++----
- drivers/net/ethernet/intel/igb/igb_main.c | 8 ++---
- drivers/net/ethernet/intel/igc/igc_ethtool.c | 12 +++----
- drivers/net/ethernet/intel/igc/igc_main.c | 8 ++---
- .../net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 8 ++---
- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 8 ++---
- drivers/net/ethernet/intel/ixgbevf/ethtool.c | 12 +++----
- .../net/ethernet/intel/ixgbevf/ixgbevf_main.c | 8 ++---
- drivers/net/ethernet/marvell/mvneta.c | 8 ++---
- .../net/ethernet/marvell/mvpp2/mvpp2_main.c | 8 ++---
- drivers/net/ethernet/marvell/sky2.c | 8 ++---
- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 8 ++---
- .../net/ethernet/mellanox/mlxsw/spectrum.c | 4 +--
- drivers/net/ethernet/microsoft/mana/mana_en.c | 8 ++---
- .../ethernet/microsoft/mana/mana_ethtool.c | 8 ++---
- .../ethernet/netronome/nfp/nfp_net_common.c | 8 ++---
- .../ethernet/netronome/nfp/nfp_net_ethtool.c | 8 ++---
- .../net/ethernet/netronome/nfp/nfp_net_repr.c | 4 +--
- drivers/net/ethernet/nvidia/forcedeth.c | 8 ++---
- .../net/ethernet/qualcomm/rmnet/rmnet_vnd.c | 4 +--
- drivers/net/ethernet/realtek/8139too.c | 8 ++---
- drivers/net/ethernet/socionext/sni_ave.c | 8 ++---
- drivers/net/ethernet/ti/am65-cpsw-nuss.c | 4 +--
- drivers/net/ethernet/ti/netcp_core.c | 8 ++---
- drivers/net/ethernet/via/via-rhine.c | 8 ++---
- .../net/ethernet/xilinx/xilinx_axienet_main.c | 8 ++---
- drivers/net/hyperv/netvsc_drv.c | 32 +++++++++----------
- drivers/net/ifb.c | 12 +++----
- drivers/net/ipvlan/ipvlan_main.c | 4 +--
- drivers/net/loopback.c | 4 +--
- drivers/net/macsec.c | 12 +++----
- drivers/net/macvlan.c | 4 +--
- drivers/net/mhi_net.c | 8 ++---
- drivers/net/netdevsim/netdev.c | 4 +--
- drivers/net/team/team.c | 4 +--
- drivers/net/team/team_mode_loadbalance.c | 4 +--
- drivers/net/veth.c | 12 +++----
- drivers/net/virtio_net.c | 16 +++++-----
- drivers/net/vrf.c | 4 +--
- drivers/net/vxlan/vxlan_vnifilter.c | 4 +--
- drivers/net/wwan/mhi_wwan_mbim.c | 8 ++---
- drivers/net/xen-netfront.c | 8 ++---
- 63 files changed, 274 insertions(+), 274 deletions(-)
-
-diff --git a/drivers/net/ethernet/alacritech/slic.h b/drivers/net/ethernet/alacritech/slic.h
-index 4eecbdfff3ff..82071d0e5f7f 100644
---- a/drivers/net/ethernet/alacritech/slic.h
-+++ b/drivers/net/ethernet/alacritech/slic.h
-@@ -288,13 +288,13 @@ do { \
- u64_stats_update_end(&(st)->syncp); \
- } while (0)
-
--#define SLIC_GET_STATS_COUNTER(newst, st, counter) \
--{ \
-- unsigned int start; \
-+#define SLIC_GET_STATS_COUNTER(newst, st, counter) \
-+{ \
-+ unsigned int start; \
- do { \
-- start = u64_stats_fetch_begin_irq(&(st)->syncp); \
-- newst = (st)->counter; \
-- } while (u64_stats_fetch_retry_irq(&(st)->syncp, start)); \
-+ start = u64_stats_fetch_begin(&(st)->syncp); \
-+ newst = (st)->counter; \
-+ } while (u64_stats_fetch_retry(&(st)->syncp, start)); \
- }
-
- struct slic_upr {
-diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
-index 444ccef76da2..8da79eedc057 100644
---- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
-+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
-@@ -118,9 +118,9 @@ static void ena_safe_update_stat(u64 *src, u64 *dst,
- unsigned int start;
-
- do {
-- start = u64_stats_fetch_begin_irq(syncp);
-+ start = u64_stats_fetch_begin(syncp);
- *(dst) = *src;
-- } while (u64_stats_fetch_retry_irq(syncp, start));
-+ } while (u64_stats_fetch_retry(syncp, start));
- }
-
- static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
-diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
-index 044b8afde69a..e296546f03cd 100644
---- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
-+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
-@@ -3312,10 +3312,10 @@ static void ena_get_stats64(struct net_device *netdev,
- tx_ring = &adapter->tx_ring[i];
-
- do {
-- start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
-+ start = u64_stats_fetch_begin(&tx_ring->syncp);
- packets = tx_ring->tx_stats.cnt;
- bytes = tx_ring->tx_stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
-+ } while (u64_stats_fetch_retry(&tx_ring->syncp, start));
-
- stats->tx_packets += packets;
- stats->tx_bytes += bytes;
-@@ -3323,20 +3323,20 @@ static void ena_get_stats64(struct net_device *netdev,
- rx_ring = &adapter->rx_ring[i];
-
- do {
-- start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
-+ start = u64_stats_fetch_begin(&rx_ring->syncp);
- packets = rx_ring->rx_stats.cnt;
- bytes = rx_ring->rx_stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
-+ } while (u64_stats_fetch_retry(&rx_ring->syncp, start));
-
- stats->rx_packets += packets;
- stats->rx_bytes += bytes;
- }
-
- do {
-- start = u64_stats_fetch_begin_irq(&adapter->syncp);
-+ start = u64_stats_fetch_begin(&adapter->syncp);
- rx_drops = adapter->dev_stats.rx_drops;
- tx_drops = adapter->dev_stats.tx_drops;
-- } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
-+ } while (u64_stats_fetch_retry(&adapter->syncp, start));
-
- stats->rx_dropped = rx_drops;
- stats->tx_dropped = tx_drops;
-diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
-index 4d9d7d1edb9b..697ce83eeae1 100644
---- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
-+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
-@@ -957,7 +957,7 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
- /* This data should mimic aq_ethtool_queue_rx_stat_names structure */
- do {
- count = 0;
-- start = u64_stats_fetch_begin_irq(&self->stats.rx.syncp);
-+ start = u64_stats_fetch_begin(&self->stats.rx.syncp);
- data[count] = self->stats.rx.packets;
- data[++count] = self->stats.rx.jumbo_packets;
- data[++count] = self->stats.rx.lro_packets;
-@@ -974,15 +974,15 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
- data[++count] = self->stats.rx.xdp_tx;
- data[++count] = self->stats.rx.xdp_invalid;
- data[++count] = self->stats.rx.xdp_redirect;
-- } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start));
-+ } while (u64_stats_fetch_retry(&self->stats.rx.syncp, start));
- } else {
- /* This data should mimic aq_ethtool_queue_tx_stat_names structure */
- do {
- count = 0;
-- start = u64_stats_fetch_begin_irq(&self->stats.tx.syncp);
-+ start = u64_stats_fetch_begin(&self->stats.tx.syncp);
- data[count] = self->stats.tx.packets;
- data[++count] = self->stats.tx.queue_restarts;
-- } while (u64_stats_fetch_retry_irq(&self->stats.tx.syncp, start));
-+ } while (u64_stats_fetch_retry(&self->stats.tx.syncp, start));
- }
-
- return ++count;
-diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
-index 8b7cdf015a16..21376c79f671 100644
---- a/drivers/net/ethernet/asix/ax88796c_main.c
-+++ b/drivers/net/ethernet/asix/ax88796c_main.c
-@@ -662,12 +662,12 @@ static void ax88796c_get_stats64(struct net_device *ndev,
- s = per_cpu_ptr(ax_local->stats, cpu);
-
- do {
-- start = u64_stats_fetch_begin_irq(&s->syncp);
-+ start = u64_stats_fetch_begin(&s->syncp);
- rx_packets = u64_stats_read(&s->rx_packets);
- rx_bytes = u64_stats_read(&s->rx_bytes);
- tx_packets = u64_stats_read(&s->tx_packets);
- tx_bytes = u64_stats_read(&s->tx_bytes);
-- } while (u64_stats_fetch_retry_irq(&s->syncp, start));
-+ } while (u64_stats_fetch_retry(&s->syncp, start));
-
- stats->rx_packets += rx_packets;
- stats->rx_bytes += rx_bytes;
-diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
-index 7f876721596c..b751dc8486dc 100644
---- a/drivers/net/ethernet/broadcom/b44.c
-+++ b/drivers/net/ethernet/broadcom/b44.c
-@@ -1680,7 +1680,7 @@ static void b44_get_stats64(struct net_device *dev,
- unsigned int start;
-
- do {
-- start = u64_stats_fetch_begin_irq(&hwstat->syncp);
-+ start = u64_stats_fetch_begin(&hwstat->syncp);
-
- /* Convert HW stats into rtnl_link_stats64 stats. */
- nstat->rx_packets = hwstat->rx_pkts;
-@@ -1714,7 +1714,7 @@ static void b44_get_stats64(struct net_device *dev,
- /* Carrier lost counter seems to be broken for some devices */
- nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
- #endif
-- } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
-+ } while (u64_stats_fetch_retry(&hwstat->syncp, start));
-
- }
-
-@@ -2082,12 +2082,12 @@ static void b44_get_ethtool_stats(struct net_device *dev,
- do {
- data_src = &hwstat->tx_good_octets;
- data_dst = data;
-- start = u64_stats_fetch_begin_irq(&hwstat->syncp);
-+ start = u64_stats_fetch_begin(&hwstat->syncp);
-
- for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
- *data_dst++ = *data_src++;
-
-- } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
-+ } while (u64_stats_fetch_retry(&hwstat->syncp, start));
- }
-
- static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
-index 425d6ccd5413..f8b1adc389b3 100644
---- a/drivers/net/ethernet/broadcom/bcmsysport.c
-+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
-@@ -457,10 +457,10 @@ static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
- for (q = 0; q < priv->netdev->num_tx_queues; q++) {
- ring = &priv->tx_rings[q];
- do {
-- start = u64_stats_fetch_begin_irq(&priv->syncp);
-+ start = u64_stats_fetch_begin(&priv->syncp);
- bytes = ring->bytes;
- packets = ring->packets;
-- } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
-+ } while (u64_stats_fetch_retry(&priv->syncp, start));
-
- *tx_bytes += bytes;
- *tx_packets += packets;
-@@ -504,9 +504,9 @@ static void bcm_sysport_get_stats(struct net_device *dev,
- if (s->stat_sizeof == sizeof(u64) &&
- s->type == BCM_SYSPORT_STAT_NETDEV64) {
- do {
-- start = u64_stats_fetch_begin_irq(syncp);
-+ start = u64_stats_fetch_begin(syncp);
- data[i] = *(u64 *)p;
-- } while (u64_stats_fetch_retry_irq(syncp, start));
-+ } while (u64_stats_fetch_retry(syncp, start));
- } else
- data[i] = *(u32 *)p;
- j++;
-@@ -1878,10 +1878,10 @@ static void bcm_sysport_get_stats64(struct net_device *dev,
- &stats->tx_packets);
-
- do {
-- start = u64_stats_fetch_begin_irq(&priv->syncp);
-+ start = u64_stats_fetch_begin(&priv->syncp);
- stats->rx_packets = stats64->rx_packets;
- stats->rx_bytes = stats64->rx_bytes;
-- } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
-+ } while (u64_stats_fetch_retry(&priv->syncp, start));
- }
-
- static void bcm_sysport_netif_start(struct net_device *dev)
-diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
-index 7c0b0bc033c9..e7137b468f5b 100644
---- a/drivers/net/ethernet/cortina/gemini.c
-+++ b/drivers/net/ethernet/cortina/gemini.c
-@@ -1941,7 +1941,7 @@ static void gmac_get_stats64(struct net_device *netdev,
-
- /* Racing with RX NAPI */
- do {
-- start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
-+ start = u64_stats_fetch_begin(&port->rx_stats_syncp);
-
- stats->rx_packets = port->stats.rx_packets;
- stats->rx_bytes = port->stats.rx_bytes;
-@@ -1953,11 +1953,11 @@ static void gmac_get_stats64(struct net_device *netdev,
- stats->rx_crc_errors = port->stats.rx_crc_errors;
- stats->rx_frame_errors = port->stats.rx_frame_errors;
-
-- } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
-+ } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
-
- /* Racing with MIB and TX completion interrupts */
- do {
-- start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
-+ start = u64_stats_fetch_begin(&port->ir_stats_syncp);
-
- stats->tx_errors = port->stats.tx_errors;
- stats->tx_packets = port->stats.tx_packets;
-@@ -1967,15 +1967,15 @@ static void gmac_get_stats64(struct net_device *netdev,
- stats->rx_missed_errors = port->stats.rx_missed_errors;
- stats->rx_fifo_errors = port->stats.rx_fifo_errors;
-
-- } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
-+ } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
-
- /* Racing with hard_start_xmit */
- do {
-- start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
-+ start = u64_stats_fetch_begin(&port->tx_stats_syncp);
-
- stats->tx_dropped = port->stats.tx_dropped;
-
-- } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
-+ } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
-
- stats->rx_dropped += stats->rx_missed_errors;
- }
-@@ -2044,18 +2044,18 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
- /* Racing with MIB interrupt */
- do {
- p = values;
-- start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
-+ start = u64_stats_fetch_begin(&port->ir_stats_syncp);
-
- for (i = 0; i < RX_STATS_NUM; i++)
- *p++ = port->hw_stats[i];
-
-- } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
-+ } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
- values = p;
-
- /* Racing with RX NAPI */
- do {
- p = values;
-- start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
-+ start = u64_stats_fetch_begin(&port->rx_stats_syncp);
-
- for (i = 0; i < RX_STATUS_NUM; i++)
- *p++ = port->rx_stats[i];
-@@ -2063,13 +2063,13 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
- *p++ = port->rx_csum_stats[i];
- *p++ = port->rx_napi_exits;
-
-- } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
-+ } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
- values = p;
-
- /* Racing with TX start_xmit */
- do {
- p = values;
-- start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
-+ start = u64_stats_fetch_begin(&port->tx_stats_syncp);
-
- for (i = 0; i < TX_MAX_FRAGS; i++) {
- *values++ = port->tx_frag_stats[i];
-@@ -2078,7 +2078,7 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
- *values++ = port->tx_frags_linearized;
- *values++ = port->tx_hw_csummed;
-
-- } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
-+ } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
- }
-
- static int gmac_get_ksettings(struct net_device *netdev,
-diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
-index 77edc3d9b505..a29de29bdf23 100644
---- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
-+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
-@@ -389,10 +389,10 @@ static void be_get_ethtool_stats(struct net_device *netdev,
- struct be_rx_stats *stats = rx_stats(rxo);
-
- do {
-- start = u64_stats_fetch_begin_irq(&stats->sync);
-+ start = u64_stats_fetch_begin(&stats->sync);
- data[base] = stats->rx_bytes;
- data[base + 1] = stats->rx_pkts;
-- } while (u64_stats_fetch_retry_irq(&stats->sync, start));
-+ } while (u64_stats_fetch_retry(&stats->sync, start));
-
- for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
- p = (u8 *)stats + et_rx_stats[i].offset;
-@@ -405,19 +405,19 @@ static void be_get_ethtool_stats(struct net_device *netdev,
- struct be_tx_stats *stats = tx_stats(txo);
-
- do {
-- start = u64_stats_fetch_begin_irq(&stats->sync_compl);
-+ start = u64_stats_fetch_begin(&stats->sync_compl);
- data[base] = stats->tx_compl;
-- } while (u64_stats_fetch_retry_irq(&stats->sync_compl, start));
-+ } while (u64_stats_fetch_retry(&stats->sync_compl, start));
-
- do {
-- start = u64_stats_fetch_begin_irq(&stats->sync);
-+ start = u64_stats_fetch_begin(&stats->sync);
- for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
- p = (u8 *)stats + et_tx_stats[i].offset;
- data[base + i] =
- (et_tx_stats[i].size == sizeof(u64)) ?
- *(u64 *)p : *(u32 *)p;
- }
-- } while (u64_stats_fetch_retry_irq(&stats->sync, start));
-+ } while (u64_stats_fetch_retry(&stats->sync, start));
- base += ETHTOOL_TXSTATS_NUM;
- }
- }
-diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
-index a9e4e6464a04..968fecfc03bd 100644
---- a/drivers/net/ethernet/emulex/benet/be_main.c
-+++ b/drivers/net/ethernet/emulex/benet/be_main.c
-@@ -665,10 +665,10 @@ static void be_get_stats64(struct net_device *netdev,
- const struct be_rx_stats *rx_stats = rx_stats(rxo);
-
- do {
-- start = u64_stats_fetch_begin_irq(&rx_stats->sync);
-+ start = u64_stats_fetch_begin(&rx_stats->sync);
- pkts = rx_stats(rxo)->rx_pkts;
- bytes = rx_stats(rxo)->rx_bytes;
-- } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
-+ } while (u64_stats_fetch_retry(&rx_stats->sync, start));
- stats->rx_packets += pkts;
- stats->rx_bytes += bytes;
- stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
-@@ -680,10 +680,10 @@ static void be_get_stats64(struct net_device *netdev,
- const struct be_tx_stats *tx_stats = tx_stats(txo);
-
- do {
-- start = u64_stats_fetch_begin_irq(&tx_stats->sync);
-+ start = u64_stats_fetch_begin(&tx_stats->sync);
- pkts = tx_stats(txo)->tx_pkts;
- bytes = tx_stats(txo)->tx_bytes;
-- } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
-+ } while (u64_stats_fetch_retry(&tx_stats->sync, start));
- stats->tx_packets += pkts;
- stats->tx_bytes += bytes;
- }
-@@ -2156,16 +2156,16 @@ static int be_get_new_eqd(struct be_eq_obj *eqo)
-
- for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
- do {
-- start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
-+ start = u64_stats_fetch_begin(&rxo->stats.sync);
- rx_pkts += rxo->stats.rx_pkts;
-- } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
-+ } while (u64_stats_fetch_retry(&rxo->stats.sync, start));
- }
-
- for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
- do {
-- start = u64_stats_fetch_begin_irq(&txo->stats.sync);
-+ start = u64_stats_fetch_begin(&txo->stats.sync);
- tx_pkts += txo->stats.tx_reqs;
-- } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
-+ } while (u64_stats_fetch_retry(&txo->stats.sync, start));
- }
-
- /* Skip, if wrapped around or first calculation */
-diff --git a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
-index 671f51135c26..53b7e95213a8 100644
---- a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
-+++ b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
-@@ -206,9 +206,9 @@ struct funeth_rxq {
-
- #define FUN_QSTAT_READ(q, seq, stats_copy) \
- do { \
-- seq = u64_stats_fetch_begin_irq(&(q)->syncp); \
-+ seq = u64_stats_fetch_begin(&(q)->syncp); \
- stats_copy = (q)->stats; \
-- } while (u64_stats_fetch_retry_irq(&(q)->syncp, (seq)))
-+ } while (u64_stats_fetch_retry(&(q)->syncp, (seq)))
-
- #define FUN_INT_NAME_LEN (IFNAMSIZ + 16)
-
-diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
-index 033f17cb96be..0a5953089a24 100644
---- a/drivers/net/ethernet/google/gve/gve_ethtool.c
-+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
-@@ -177,14 +177,14 @@ gve_get_ethtool_stats(struct net_device *netdev,
- struct gve_rx_ring *rx = &priv->rx[ring];
-
- start =
-- u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
-+ u64_stats_fetch_begin(&priv->rx[ring].statss);
- tmp_rx_pkts = rx->rpackets;
- tmp_rx_bytes = rx->rbytes;
- tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
- tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
- tmp_rx_desc_err_dropped_pkt =
- rx->rx_desc_err_dropped_pkt;
-- } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
-+ } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
- start));
- rx_pkts += tmp_rx_pkts;
- rx_bytes += tmp_rx_bytes;
-@@ -198,10 +198,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
- if (priv->tx) {
- do {
- start =
-- u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
-+ u64_stats_fetch_begin(&priv->tx[ring].statss);
- tmp_tx_pkts = priv->tx[ring].pkt_done;
- tmp_tx_bytes = priv->tx[ring].bytes_done;
-- } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
-+ } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
- start));
- tx_pkts += tmp_tx_pkts;
- tx_bytes += tmp_tx_bytes;
-@@ -259,13 +259,13 @@ gve_get_ethtool_stats(struct net_device *netdev,
- data[i++] = rx->fill_cnt - rx->cnt;
- do {
- start =
-- u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
-+ u64_stats_fetch_begin(&priv->rx[ring].statss);
- tmp_rx_bytes = rx->rbytes;
- tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
- tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
- tmp_rx_desc_err_dropped_pkt =
- rx->rx_desc_err_dropped_pkt;
-- } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
-+ } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
- start));
- data[i++] = tmp_rx_bytes;
- data[i++] = rx->rx_cont_packet_cnt;
-@@ -331,9 +331,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
- }
- do {
- start =
-- u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
-+ u64_stats_fetch_begin(&priv->tx[ring].statss);
- tmp_tx_bytes = tx->bytes_done;
-- } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
-+ } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
- start));
- data[i++] = tmp_tx_bytes;
- data[i++] = tx->wake_queue;
-diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
-index d3f6ad586ba1..1c2cd3ee1956 100644
---- a/drivers/net/ethernet/google/gve/gve_main.c
-+++ b/drivers/net/ethernet/google/gve/gve_main.c
-@@ -51,10 +51,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
- for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
- do {
- start =
-- u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
-+ u64_stats_fetch_begin(&priv->rx[ring].statss);
- packets = priv->rx[ring].rpackets;
- bytes = priv->rx[ring].rbytes;
-- } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
-+ } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
- start));
- s->rx_packets += packets;
- s->rx_bytes += bytes;
-@@ -64,10 +64,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
- for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
- do {
- start =
-- u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
-+ u64_stats_fetch_begin(&priv->tx[ring].statss);
- packets = priv->tx[ring].pkt_done;
- bytes = priv->tx[ring].bytes_done;
-- } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
-+ } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
- start));
- s->tx_packets += packets;
- s->tx_bytes += bytes;
-@@ -1260,9 +1260,9 @@ void gve_handle_report_stats(struct gve_priv *priv)
- }
-
- do {
-- start = u64_stats_fetch_begin_irq(&priv->tx[idx].statss);
-+ start = u64_stats_fetch_begin(&priv->tx[idx].statss);
- tx_bytes = priv->tx[idx].bytes_done;
-- } while (u64_stats_fetch_retry_irq(&priv->tx[idx].statss, start));
-+ } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
- stats[stats_idx++] = (struct stats) {
- .stat_name = cpu_to_be32(TX_WAKE_CNT),
- .value = cpu_to_be64(priv->tx[idx].wake_queue),
-diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
-index 78d6752fe051..5bf81dca14fa 100644
---- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
-+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
-@@ -2496,7 +2496,7 @@ static void hns3_fetch_stats(struct rtnl_link_stats64 *stats,
- unsigned int start;
-
- do {
-- start = u64_stats_fetch_begin_irq(&ring->syncp);
-+ start = u64_stats_fetch_begin(&ring->syncp);
- if (is_tx) {
- stats->tx_bytes += ring->stats.tx_bytes;
- stats->tx_packets += ring->stats.tx_pkts;
-@@ -2530,7 +2530,7 @@ static void hns3_fetch_stats(struct rtnl_link_stats64 *stats,
- stats->multicast += ring->stats.rx_multicast;
- stats->rx_length_errors += ring->stats.err_pkt_len;
- }
-- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->syncp, start));
- }
-
- static void hns3_nic_get_stats64(struct net_device *netdev,
-diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
-index d649c6e323c8..ceec8be2a73b 100644
---- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
-+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
-@@ -74,14 +74,14 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
- unsigned int start;
-
- do {
-- start = u64_stats_fetch_begin_irq(&rxq_stats->syncp);
-+ start = u64_stats_fetch_begin(&rxq_stats->syncp);
- stats->pkts = rxq_stats->pkts;
- stats->bytes = rxq_stats->bytes;
- stats->errors = rxq_stats->csum_errors +
- rxq_stats->other_errors;
- stats->csum_errors = rxq_stats->csum_errors;
- stats->other_errors = rxq_stats->other_errors;
-- } while (u64_stats_fetch_retry_irq(&rxq_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
- }
-
- /**
-diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
-index e91476c8ff8b..ad47ac51a139 100644
---- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
-+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
-@@ -99,14 +99,14 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
- unsigned int start;
-
- do {
-- start = u64_stats_fetch_begin_irq(&txq_stats->syncp);
-+ start = u64_stats_fetch_begin(&txq_stats->syncp);
- stats->pkts = txq_stats->pkts;
- stats->bytes = txq_stats->bytes;
- stats->tx_busy = txq_stats->tx_busy;
- stats->tx_wake = txq_stats->tx_wake;
- stats->tx_dropped = txq_stats->tx_dropped;
- stats->big_frags_pkts = txq_stats->big_frags_pkts;
-- } while (u64_stats_fetch_retry_irq(&txq_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
- }
-
- /**
-diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
-index 2cca9e84e31e..34ab5ff9823b 100644
---- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
-+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
-@@ -1229,10 +1229,10 @@ static void fm10k_get_stats64(struct net_device *netdev,
- continue;
-
- do {
-- start = u64_stats_fetch_begin_irq(&ring->syncp);
-+ start = u64_stats_fetch_begin(&ring->syncp);
- packets = ring->stats.packets;
- bytes = ring->stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->syncp, start));
-
- stats->rx_packets += packets;
- stats->rx_bytes += bytes;
-@@ -1245,10 +1245,10 @@ static void fm10k_get_stats64(struct net_device *netdev,
- continue;
-
- do {
-- start = u64_stats_fetch_begin_irq(&ring->syncp);
-+ start = u64_stats_fetch_begin(&ring->syncp);
- packets = ring->stats.packets;
- bytes = ring->stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->syncp, start));
-
- stats->tx_packets += packets;
- stats->tx_bytes += bytes;
-diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
-index 107bcca7db8c..8f36fe90180f 100644
---- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
-+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
-@@ -154,7 +154,7 @@ __i40e_add_ethtool_stats(u64 **data, void *pointer,
- * @ring: the ring to copy
- *
- * Queue statistics must be copied while protected by
-- * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats.
-+ * u64_stats_fetch_begin, so we can't directly use i40e_add_ethtool_stats.
- * Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the
- * ring pointer is null, zero out the queue stat values and update the data
- * pointer. Otherwise safely copy the stats from the ring into the supplied
-@@ -172,16 +172,16 @@ i40e_add_queue_stats(u64 **data, struct i40e_ring *ring)
-
- /* To avoid invalid statistics values, ensure that we keep retrying
- * the copy until we get a consistent value according to
-- * u64_stats_fetch_retry_irq. But first, make sure our ring is
-+ * u64_stats_fetch_retry. But first, make sure our ring is
- * non-null before attempting to access its syncp.
- */
- do {
-- start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp);
-+ start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp);
- for (i = 0; i < size; i++) {
- i40e_add_one_ethtool_stat(&(*data)[i], ring,
- &stats[i]);
- }
-- } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start));
-+ } while (ring && u64_stats_fetch_retry(&ring->syncp, start));
-
- /* Once we successfully copy the stats in, update the data pointer */
- *data += size;
-diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
-index 63d43ef86f9b..8003cf091eee 100644
---- a/drivers/net/ethernet/intel/i40e/i40e_main.c
-+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
-@@ -425,10 +425,10 @@ static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
- unsigned int start;
-
- do {
-- start = u64_stats_fetch_begin_irq(&ring->syncp);
-+ start = u64_stats_fetch_begin(&ring->syncp);
- packets = ring->stats.packets;
- bytes = ring->stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->syncp, start));
-
- stats->tx_packets += packets;
- stats->tx_bytes += bytes;
-@@ -478,10 +478,10 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
- if (!ring)
- continue;
- do {
-- start = u64_stats_fetch_begin_irq(&ring->syncp);
-+ start = u64_stats_fetch_begin(&ring->syncp);
- packets = ring->stats.packets;
- bytes = ring->stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->syncp, start));
-
- stats->rx_packets += packets;
- stats->rx_bytes += bytes;
-@@ -903,10 +903,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
- continue;
-
- do {
-- start = u64_stats_fetch_begin_irq(&p->syncp);
-+ start = u64_stats_fetch_begin(&p->syncp);
- packets = p->stats.packets;
- bytes = p->stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
-+ } while (u64_stats_fetch_retry(&p->syncp, start));
- tx_b += bytes;
- tx_p += packets;
- tx_restart += p->tx_stats.restart_queue;
-@@ -921,10 +921,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
- continue;
-
- do {
-- start = u64_stats_fetch_begin_irq(&p->syncp);
-+ start = u64_stats_fetch_begin(&p->syncp);
- packets = p->stats.packets;
- bytes = p->stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
-+ } while (u64_stats_fetch_retry(&p->syncp, start));
- rx_b += bytes;
- rx_p += packets;
- rx_buf += p->rx_stats.alloc_buff_failed;
-@@ -941,10 +941,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
- continue;
-
- do {
-- start = u64_stats_fetch_begin_irq(&p->syncp);
-+ start = u64_stats_fetch_begin(&p->syncp);
- packets = p->stats.packets;
- bytes = p->stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
-+ } while (u64_stats_fetch_retry(&p->syncp, start));
- tx_b += bytes;
- tx_p += packets;
- tx_restart += p->tx_stats.restart_queue;
-diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
-index f4ac2b164b3e..892c6a4f03bb 100644
---- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
-+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
-@@ -147,7 +147,7 @@ __iavf_add_ethtool_stats(u64 **data, void *pointer,
- * @ring: the ring to copy
- *
- * Queue statistics must be copied while protected by
-- * u64_stats_fetch_begin_irq, so we can't directly use iavf_add_ethtool_stats.
-+ * u64_stats_fetch_begin, so we can't directly use iavf_add_ethtool_stats.
- * Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the
- * ring pointer is null, zero out the queue stat values and update the data
- * pointer. Otherwise safely copy the stats from the ring into the supplied
-@@ -165,14 +165,14 @@ iavf_add_queue_stats(u64 **data, struct iavf_ring *ring)
-
- /* To avoid invalid statistics values, ensure that we keep retrying
- * the copy until we get a consistent value according to
-- * u64_stats_fetch_retry_irq. But first, make sure our ring is
-+ * u64_stats_fetch_retry. But first, make sure our ring is
- * non-null before attempting to access its syncp.
- */
- do {
-- start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp);
-+ start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp);
- for (i = 0; i < size; i++)
- iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]);
-- } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start));
-+ } while (ring && u64_stats_fetch_retry(&ring->syncp, start));
-
- /* Once we successfully copy the stats in, update the data pointer */
- *data += size;
-diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
-index ab46cfca4028..aa74ce9afdf2 100644
---- a/drivers/net/ethernet/intel/ice/ice_main.c
-+++ b/drivers/net/ethernet/intel/ice/ice_main.c
-@@ -6419,10 +6419,10 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
- unsigned int start;
-
- do {
-- start = u64_stats_fetch_begin_irq(syncp);
-+ start = u64_stats_fetch_begin(syncp);
- *pkts = stats.pkts;
- *bytes = stats.bytes;
-- } while (u64_stats_fetch_retry_irq(syncp, start));
-+ } while (u64_stats_fetch_retry(syncp, start));
- }
-
- /**
-diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
-index ceff537d9d22..4ee849985e2b 100644
---- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
-+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
-@@ -2316,15 +2316,15 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
-
- ring = adapter->tx_ring[j];
- do {
-- start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
-+ start = u64_stats_fetch_begin(&ring->tx_syncp);
- data[i] = ring->tx_stats.packets;
- data[i+1] = ring->tx_stats.bytes;
- data[i+2] = ring->tx_stats.restart_queue;
-- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->tx_syncp, start));
- do {
-- start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
-+ start = u64_stats_fetch_begin(&ring->tx_syncp2);
- restart2 = ring->tx_stats.restart_queue2;
-- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
-+ } while (u64_stats_fetch_retry(&ring->tx_syncp2, start));
- data[i+2] += restart2;
-
- i += IGB_TX_QUEUE_STATS_LEN;
-@@ -2332,13 +2332,13 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
- for (j = 0; j < adapter->num_rx_queues; j++) {
- ring = adapter->rx_ring[j];
- do {
-- start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
-+ start = u64_stats_fetch_begin(&ring->rx_syncp);
- data[i] = ring->rx_stats.packets;
- data[i+1] = ring->rx_stats.bytes;
- data[i+2] = ring->rx_stats.drops;
- data[i+3] = ring->rx_stats.csum_err;
- data[i+4] = ring->rx_stats.alloc_failed;
-- } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->rx_syncp, start));
- i += IGB_RX_QUEUE_STATS_LEN;
- }
- spin_unlock(&adapter->stats64_lock);
-diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
-index 45ce4ed16146..9824f7cfaca4 100644
---- a/drivers/net/ethernet/intel/igb/igb_main.c
-+++ b/drivers/net/ethernet/intel/igb/igb_main.c
-@@ -6660,10 +6660,10 @@ void igb_update_stats(struct igb_adapter *adapter)
- }
-
- do {
-- start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
-+ start = u64_stats_fetch_begin(&ring->rx_syncp);
- _bytes = ring->rx_stats.bytes;
- _packets = ring->rx_stats.packets;
-- } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->rx_syncp, start));
- bytes += _bytes;
- packets += _packets;
- }
-@@ -6676,10 +6676,10 @@ void igb_update_stats(struct igb_adapter *adapter)
- for (i = 0; i < adapter->num_tx_queues; i++) {
- struct igb_ring *ring = adapter->tx_ring[i];
- do {
-- start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
-+ start = u64_stats_fetch_begin(&ring->tx_syncp);
- _bytes = ring->tx_stats.bytes;
- _packets = ring->tx_stats.packets;
-- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->tx_syncp, start));
- bytes += _bytes;
- packets += _packets;
- }
-diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
-index 2bee9cace598..f7284fa4324a 100644
---- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
-+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
-@@ -840,15 +840,15 @@ static void igc_ethtool_get_stats(struct net_device *netdev,
-
- ring = adapter->tx_ring[j];
- do {
-- start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
-+ start = u64_stats_fetch_begin(&ring->tx_syncp);
- data[i] = ring->tx_stats.packets;
- data[i + 1] = ring->tx_stats.bytes;
- data[i + 2] = ring->tx_stats.restart_queue;
-- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->tx_syncp, start));
- do {
-- start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
-+ start = u64_stats_fetch_begin(&ring->tx_syncp2);
- restart2 = ring->tx_stats.restart_queue2;
-- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
-+ } while (u64_stats_fetch_retry(&ring->tx_syncp2, start));
- data[i + 2] += restart2;
-
- i += IGC_TX_QUEUE_STATS_LEN;
-@@ -856,13 +856,13 @@ static void igc_ethtool_get_stats(struct net_device *netdev,
- for (j = 0; j < adapter->num_rx_queues; j++) {
- ring = adapter->rx_ring[j];
- do {
-- start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
-+ start = u64_stats_fetch_begin(&ring->rx_syncp);
- data[i] = ring->rx_stats.packets;
- data[i + 1] = ring->rx_stats.bytes;
- data[i + 2] = ring->rx_stats.drops;
- data[i + 3] = ring->rx_stats.csum_err;
- data[i + 4] = ring->rx_stats.alloc_failed;
-- } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->rx_syncp, start));
- i += IGC_RX_QUEUE_STATS_LEN;
- }
- spin_unlock(&adapter->stats64_lock);
-diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
-index 4b6f882b380d..c8c5c9d96ba2 100644
---- a/drivers/net/ethernet/intel/igc/igc_main.c
-+++ b/drivers/net/ethernet/intel/igc/igc_main.c
-@@ -4868,10 +4868,10 @@ void igc_update_stats(struct igc_adapter *adapter)
- }
-
- do {
-- start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
-+ start = u64_stats_fetch_begin(&ring->rx_syncp);
- _bytes = ring->rx_stats.bytes;
- _packets = ring->rx_stats.packets;
-- } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->rx_syncp, start));
- bytes += _bytes;
- packets += _packets;
- }
-@@ -4885,10 +4885,10 @@ void igc_update_stats(struct igc_adapter *adapter)
- struct igc_ring *ring = adapter->tx_ring[i];
-
- do {
-- start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
-+ start = u64_stats_fetch_begin(&ring->tx_syncp);
- _bytes = ring->tx_stats.bytes;
- _packets = ring->tx_stats.packets;
-- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->tx_syncp, start));
- bytes += _bytes;
- packets += _packets;
- }
-diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
-index 0051aa676e19..1c22ff2dba9b 100644
---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
-+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
-@@ -1335,10 +1335,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
- }
-
- do {
-- start = u64_stats_fetch_begin_irq(&ring->syncp);
-+ start = u64_stats_fetch_begin(&ring->syncp);
- data[i] = ring->stats.packets;
- data[i+1] = ring->stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->syncp, start));
- i += 2;
- }
- for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
-@@ -1351,10 +1351,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
- }
-
- do {
-- start = u64_stats_fetch_begin_irq(&ring->syncp);
-+ start = u64_stats_fetch_begin(&ring->syncp);
- data[i] = ring->stats.packets;
- data[i+1] = ring->stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->syncp, start));
- i += 2;
- }
-
-diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
-index 9e0e13638c46..ec86b61a8db8 100644
---- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
-+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
-@@ -9051,10 +9051,10 @@ static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
-
- if (ring) {
- do {
-- start = u64_stats_fetch_begin_irq(&ring->syncp);
-+ start = u64_stats_fetch_begin(&ring->syncp);
- packets = ring->stats.packets;
- bytes = ring->stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->syncp, start));
- stats->tx_packets += packets;
- stats->tx_bytes += bytes;
- }
-@@ -9074,10 +9074,10 @@ static void ixgbe_get_stats64(struct net_device *netdev,
-
- if (ring) {
- do {
-- start = u64_stats_fetch_begin_irq(&ring->syncp);
-+ start = u64_stats_fetch_begin(&ring->syncp);
- packets = ring->stats.packets;
- bytes = ring->stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->syncp, start));
- stats->rx_packets += packets;
- stats->rx_bytes += bytes;
- }
-diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
-index ccfa6b91aac6..296915414a7c 100644
---- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
-+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
-@@ -458,10 +458,10 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
- }
-
- do {
-- start = u64_stats_fetch_begin_irq(&ring->syncp);
-+ start = u64_stats_fetch_begin(&ring->syncp);
- data[i] = ring->stats.packets;
- data[i + 1] = ring->stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->syncp, start));
- i += 2;
- }
-
-@@ -475,10 +475,10 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
- }
-
- do {
-- start = u64_stats_fetch_begin_irq(&ring->syncp);
-+ start = u64_stats_fetch_begin(&ring->syncp);
- data[i] = ring->stats.packets;
- data[i + 1] = ring->stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->syncp, start));
- i += 2;
- }
-
-@@ -492,10 +492,10 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
- }
-
- do {
-- start = u64_stats_fetch_begin_irq(&ring->syncp);
-+ start = u64_stats_fetch_begin(&ring->syncp);
- data[i] = ring->stats.packets;
- data[i + 1] = ring->stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->syncp, start));
- i += 2;
- }
- }
-diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
-index e338fa572793..a9479ddf68eb 100644
---- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
-+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
-@@ -4350,10 +4350,10 @@ static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
-
- if (ring) {
- do {
-- start = u64_stats_fetch_begin_irq(&ring->syncp);
-+ start = u64_stats_fetch_begin(&ring->syncp);
- bytes = ring->stats.bytes;
- packets = ring->stats.packets;
-- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->syncp, start));
- stats->tx_bytes += bytes;
- stats->tx_packets += packets;
- }
-@@ -4376,10 +4376,10 @@ static void ixgbevf_get_stats(struct net_device *netdev,
- for (i = 0; i < adapter->num_rx_queues; i++) {
- ring = adapter->rx_ring[i];
- do {
-- start = u64_stats_fetch_begin_irq(&ring->syncp);
-+ start = u64_stats_fetch_begin(&ring->syncp);
- bytes = ring->stats.bytes;
- packets = ring->stats.packets;
-- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
-+ } while (u64_stats_fetch_retry(&ring->syncp, start));
- stats->rx_bytes += bytes;
- stats->rx_packets += packets;
- }
-diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
-index eb4ebaa1c92f..327f03f80836 100644
---- a/drivers/net/ethernet/marvell/mvneta.c
-+++ b/drivers/net/ethernet/marvell/mvneta.c
-@@ -813,14 +813,14 @@ mvneta_get_stats64(struct net_device *dev,
-
- cpu_stats = per_cpu_ptr(pp->stats, cpu);
- do {
-- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
-+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
- rx_packets = cpu_stats->es.ps.rx_packets;
- rx_bytes = cpu_stats->es.ps.rx_bytes;
- rx_dropped = cpu_stats->rx_dropped;
- rx_errors = cpu_stats->rx_errors;
- tx_packets = cpu_stats->es.ps.tx_packets;
- tx_bytes = cpu_stats->es.ps.tx_bytes;
-- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
-
- stats->rx_packets += rx_packets;
- stats->rx_bytes += rx_bytes;
-@@ -4765,7 +4765,7 @@ mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
-
- stats = per_cpu_ptr(pp->stats, cpu);
- do {
-- start = u64_stats_fetch_begin_irq(&stats->syncp);
-+ start = u64_stats_fetch_begin(&stats->syncp);
- skb_alloc_error = stats->es.skb_alloc_error;
- refill_error = stats->es.refill_error;
- xdp_redirect = stats->es.ps.xdp_redirect;
-@@ -4775,7 +4775,7 @@ mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
- xdp_xmit_err = stats->es.ps.xdp_xmit_err;
- xdp_tx = stats->es.ps.xdp_tx;
- xdp_tx_err = stats->es.ps.xdp_tx_err;
-- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&stats->syncp, start));
-
- es->skb_alloc_error += skb_alloc_error;
- es->refill_error += refill_error;
-diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
-index f936640cca4e..8c7470ab4985 100644
---- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
-+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
-@@ -2008,7 +2008,7 @@ mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
-
- cpu_stats = per_cpu_ptr(port->stats, cpu);
- do {
-- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
-+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
- xdp_redirect = cpu_stats->xdp_redirect;
- xdp_pass = cpu_stats->xdp_pass;
- xdp_drop = cpu_stats->xdp_drop;
-@@ -2016,7 +2016,7 @@ mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
- xdp_xmit_err = cpu_stats->xdp_xmit_err;
- xdp_tx = cpu_stats->xdp_tx;
- xdp_tx_err = cpu_stats->xdp_tx_err;
-- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
-
- xdp_stats->xdp_redirect += xdp_redirect;
- xdp_stats->xdp_pass += xdp_pass;
-@@ -5115,12 +5115,12 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
-
- cpu_stats = per_cpu_ptr(port->stats, cpu);
- do {
-- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
-+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
- rx_packets = cpu_stats->rx_packets;
- rx_bytes = cpu_stats->rx_bytes;
- tx_packets = cpu_stats->tx_packets;
- tx_bytes = cpu_stats->tx_bytes;
-- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
-
- stats->rx_packets += rx_packets;
- stats->rx_bytes += rx_bytes;
-diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
-index ab33ba1c3023..ff97b140886a 100644
---- a/drivers/net/ethernet/marvell/sky2.c
-+++ b/drivers/net/ethernet/marvell/sky2.c
-@@ -3894,19 +3894,19 @@ static void sky2_get_stats(struct net_device *dev,
- u64 _bytes, _packets;
-
- do {
-- start = u64_stats_fetch_begin_irq(&sky2->rx_stats.syncp);
-+ start = u64_stats_fetch_begin(&sky2->rx_stats.syncp);
- _bytes = sky2->rx_stats.bytes;
- _packets = sky2->rx_stats.packets;
-- } while (u64_stats_fetch_retry_irq(&sky2->rx_stats.syncp, start));
-+ } while (u64_stats_fetch_retry(&sky2->rx_stats.syncp, start));
-
- stats->rx_packets = _packets;
- stats->rx_bytes = _bytes;
-
- do {
-- start = u64_stats_fetch_begin_irq(&sky2->tx_stats.syncp);
-+ start = u64_stats_fetch_begin(&sky2->tx_stats.syncp);
- _bytes = sky2->tx_stats.bytes;
- _packets = sky2->tx_stats.packets;
-- } while (u64_stats_fetch_retry_irq(&sky2->tx_stats.syncp, start));
-+ } while (u64_stats_fetch_retry(&sky2->tx_stats.syncp, start));
-
- stats->tx_packets = _packets;
- stats->tx_bytes = _bytes;
-diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-index 17e6ac4445af..f737e2b9a29e 100644
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -866,7 +866,7 @@ static void mtk_get_stats64(struct net_device *dev,
- }
-
- do {
-- start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
-+ start = u64_stats_fetch_begin(&hw_stats->syncp);
- storage->rx_packets = hw_stats->rx_packets;
- storage->tx_packets = hw_stats->tx_packets;
- storage->rx_bytes = hw_stats->rx_bytes;
-@@ -878,7 +878,7 @@ static void mtk_get_stats64(struct net_device *dev,
- storage->rx_crc_errors = hw_stats->rx_fcs_errors;
- storage->rx_errors = hw_stats->rx_checksum_errors;
- storage->tx_aborted_errors = hw_stats->tx_skip;
-- } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&hw_stats->syncp, start));
-
- storage->tx_errors = dev->stats.tx_errors;
- storage->rx_dropped = dev->stats.rx_dropped;
-@@ -3694,13 +3694,13 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
-
- do {
- data_dst = data;
-- start = u64_stats_fetch_begin_irq(&hwstats->syncp);
-+ start = u64_stats_fetch_begin(&hwstats->syncp);
-
- for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
- *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
- if (mtk_page_pool_enabled(mac->hw))
- mtk_ethtool_pp_stats(mac->hw, data_dst);
-- } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
-+ } while (u64_stats_fetch_retry(&hwstats->syncp, start));
- }
-
- static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
-diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
-index 67ecdb9e708f..8345499563a4 100644
---- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
-+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
-@@ -827,12 +827,12 @@ mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
- for_each_possible_cpu(i) {
- p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
- do {
-- start = u64_stats_fetch_begin_irq(&p->syncp);
-+ start = u64_stats_fetch_begin(&p->syncp);
- rx_packets = p->rx_packets;
- rx_bytes = p->rx_bytes;
- tx_packets = p->tx_packets;
- tx_bytes = p->tx_bytes;
-- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
-+ } while (u64_stats_fetch_retry(&p->syncp, start));
-
- stats->rx_packets += rx_packets;
- stats->rx_bytes += rx_bytes;
-diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
-index b751b03eddfb..14d17c6f90ff 100644
---- a/drivers/net/ethernet/microsoft/mana/mana_en.c
-+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
-@@ -316,10 +316,10 @@ static void mana_get_stats64(struct net_device *ndev,
- rx_stats = &apc->rxqs[q]->stats;
-
- do {
-- start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
-+ start = u64_stats_fetch_begin(&rx_stats->syncp);
- packets = rx_stats->packets;
- bytes = rx_stats->bytes;
-- } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
-
- st->rx_packets += packets;
- st->rx_bytes += bytes;
-@@ -329,10 +329,10 @@ static void mana_get_stats64(struct net_device *ndev,
- tx_stats = &apc->tx_qp[q].txq.stats;
-
- do {
-- start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
-+ start = u64_stats_fetch_begin(&tx_stats->syncp);
- packets = tx_stats->packets;
- bytes = tx_stats->bytes;
-- } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
-
- st->tx_packets += packets;
- st->tx_bytes += bytes;
-diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
-index c530db76880f..96d55c91c969 100644
---- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
-+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
-@@ -90,13 +90,13 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
- rx_stats = &apc->rxqs[q]->stats;
-
- do {
-- start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
-+ start = u64_stats_fetch_begin(&rx_stats->syncp);
- packets = rx_stats->packets;
- bytes = rx_stats->bytes;
- xdp_drop = rx_stats->xdp_drop;
- xdp_tx = rx_stats->xdp_tx;
- xdp_redirect = rx_stats->xdp_redirect;
-- } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
-
- data[i++] = packets;
- data[i++] = bytes;
-@@ -109,11 +109,11 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
- tx_stats = &apc->tx_qp[q].txq.stats;
-
- do {
-- start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
-+ start = u64_stats_fetch_begin(&tx_stats->syncp);
- packets = tx_stats->packets;
- bytes = tx_stats->bytes;
- xdp_xmit = tx_stats->xdp_xmit;
-- } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
-
- data[i++] = packets;
- data[i++] = bytes;
-diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
-index 27f4786ace4f..a5ca5c4a7896 100644
---- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
-+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
-@@ -1631,21 +1631,21 @@ static void nfp_net_stat64(struct net_device *netdev,
- unsigned int start;
-
- do {
-- start = u64_stats_fetch_begin_irq(&r_vec->rx_sync);
-+ start = u64_stats_fetch_begin(&r_vec->rx_sync);
- data[0] = r_vec->rx_pkts;
- data[1] = r_vec->rx_bytes;
- data[2] = r_vec->rx_drops;
-- } while (u64_stats_fetch_retry_irq(&r_vec->rx_sync, start));
-+ } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
- stats->rx_packets += data[0];
- stats->rx_bytes += data[1];
- stats->rx_dropped += data[2];
-
- do {
-- start = u64_stats_fetch_begin_irq(&r_vec->tx_sync);
-+ start = u64_stats_fetch_begin(&r_vec->tx_sync);
- data[0] = r_vec->tx_pkts;
- data[1] = r_vec->tx_bytes;
- data[2] = r_vec->tx_errors;
-- } while (u64_stats_fetch_retry_irq(&r_vec->tx_sync, start));
-+ } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
- stats->tx_packets += data[0];
- stats->tx_bytes += data[1];
- stats->tx_errors += data[2];
-diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
-index af376b900067..cc97b3d00414 100644
---- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
-+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
-@@ -881,7 +881,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
- unsigned int start;
-
- do {
-- start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].rx_sync);
-+ start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
- data[0] = nn->r_vecs[i].rx_pkts;
- tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
- tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
-@@ -889,10 +889,10 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
- tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
- tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
- tmp[5] = nn->r_vecs[i].hw_tls_rx;
-- } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].rx_sync, start));
-+ } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
-
- do {
-- start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].tx_sync);
-+ start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
- data[1] = nn->r_vecs[i].tx_pkts;
- data[2] = nn->r_vecs[i].tx_busy;
- tmp[6] = nn->r_vecs[i].hw_csum_tx;
-@@ -902,7 +902,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
- tmp[10] = nn->r_vecs[i].hw_tls_tx;
- tmp[11] = nn->r_vecs[i].tls_tx_fallback;
- tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
-- } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].tx_sync, start));
-+ } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
-
- data += NN_RVEC_PER_Q_STATS;
-
-diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
-index 8b77582bdfa0..a6b6ca1fd55e 100644
---- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
-+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
-@@ -134,13 +134,13 @@ nfp_repr_get_host_stats64(const struct net_device *netdev,
-
- repr_stats = per_cpu_ptr(repr->stats, i);
- do {
-- start = u64_stats_fetch_begin_irq(&repr_stats->syncp);
-+ start = u64_stats_fetch_begin(&repr_stats->syncp);
- tbytes = repr_stats->tx_bytes;
- tpkts = repr_stats->tx_packets;
- tdrops = repr_stats->tx_drops;
- rbytes = repr_stats->rx_bytes;
- rpkts = repr_stats->rx_packets;
-- } while (u64_stats_fetch_retry_irq(&repr_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&repr_stats->syncp, start));
-
- stats->tx_bytes += tbytes;
- stats->tx_packets += tpkts;
-diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
-index 486cbc8ab224..7a549b834e97 100644
---- a/drivers/net/ethernet/nvidia/forcedeth.c
-+++ b/drivers/net/ethernet/nvidia/forcedeth.c
-@@ -1734,12 +1734,12 @@ static void nv_get_stats(int cpu, struct fe_priv *np,
- u64 tx_packets, tx_bytes, tx_dropped;
-
- do {
-- syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
-+ syncp_start = u64_stats_fetch_begin(&np->swstats_rx_syncp);
- rx_packets = src->stat_rx_packets;
- rx_bytes = src->stat_rx_bytes;
- rx_dropped = src->stat_rx_dropped;
- rx_missed_errors = src->stat_rx_missed_errors;
-- } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
-+ } while (u64_stats_fetch_retry(&np->swstats_rx_syncp, syncp_start));
-
- storage->rx_packets += rx_packets;
- storage->rx_bytes += rx_bytes;
-@@ -1747,11 +1747,11 @@ static void nv_get_stats(int cpu, struct fe_priv *np,
- storage->rx_missed_errors += rx_missed_errors;
-
- do {
-- syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
-+ syncp_start = u64_stats_fetch_begin(&np->swstats_tx_syncp);
- tx_packets = src->stat_tx_packets;
- tx_bytes = src->stat_tx_bytes;
- tx_dropped = src->stat_tx_dropped;
-- } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
-+ } while (u64_stats_fetch_retry(&np->swstats_tx_syncp, syncp_start));
-
- storage->tx_packets += tx_packets;
- storage->tx_bytes += tx_bytes;
-diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
-index 1b2119b1d48a..3f5e6572d20e 100644
---- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
-+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
-@@ -135,9 +135,9 @@ static void rmnet_get_stats64(struct net_device *dev,
- pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
-
- do {
-- start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
-+ start = u64_stats_fetch_begin(&pcpu_ptr->syncp);
- snapshot = pcpu_ptr->stats; /* struct assignment */
-- } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
-+ } while (u64_stats_fetch_retry(&pcpu_ptr->syncp, start));
-
- total_stats.rx_pkts += snapshot.rx_pkts;
- total_stats.rx_bytes += snapshot.rx_bytes;
-diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
-index 469e2e229c6e..9ce0e8a64ba8 100644
---- a/drivers/net/ethernet/realtek/8139too.c
-+++ b/drivers/net/ethernet/realtek/8139too.c
-@@ -2532,16 +2532,16 @@ rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
- netdev_stats_to_stats64(stats, &dev->stats);
-
- do {
-- start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
-+ start = u64_stats_fetch_begin(&tp->rx_stats.syncp);
- stats->rx_packets = tp->rx_stats.packets;
- stats->rx_bytes = tp->rx_stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
-+ } while (u64_stats_fetch_retry(&tp->rx_stats.syncp, start));
-
- do {
-- start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
-+ start = u64_stats_fetch_begin(&tp->tx_stats.syncp);
- stats->tx_packets = tp->tx_stats.packets;
- stats->tx_bytes = tp->tx_stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
-+ } while (u64_stats_fetch_retry(&tp->tx_stats.syncp, start));
- }
-
- /* Set or clear the multicast filter for this adaptor.
-diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
-index d2c6a5dfdc0e..b7e24ae92525 100644
---- a/drivers/net/ethernet/socionext/sni_ave.c
-+++ b/drivers/net/ethernet/socionext/sni_ave.c
-@@ -1508,16 +1508,16 @@ static void ave_get_stats64(struct net_device *ndev,
- unsigned int start;
-
- do {
-- start = u64_stats_fetch_begin_irq(&priv->stats_rx.syncp);
-+ start = u64_stats_fetch_begin(&priv->stats_rx.syncp);
- stats->rx_packets = priv->stats_rx.packets;
- stats->rx_bytes = priv->stats_rx.bytes;
-- } while (u64_stats_fetch_retry_irq(&priv->stats_rx.syncp, start));
-+ } while (u64_stats_fetch_retry(&priv->stats_rx.syncp, start));
-
- do {
-- start = u64_stats_fetch_begin_irq(&priv->stats_tx.syncp);
-+ start = u64_stats_fetch_begin(&priv->stats_tx.syncp);
- stats->tx_packets = priv->stats_tx.packets;
- stats->tx_bytes = priv->stats_tx.bytes;
-- } while (u64_stats_fetch_retry_irq(&priv->stats_tx.syncp, start));
-+ } while (u64_stats_fetch_retry(&priv->stats_tx.syncp, start));
-
- stats->rx_errors = priv->stats_rx.errors;
- stats->tx_errors = priv->stats_tx.errors;
-diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
-index 9f2553799895..1085b0642c28 100644
---- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
-+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
-@@ -1376,12 +1376,12 @@ static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
-
- cpu_stats = per_cpu_ptr(ndev_priv->stats, cpu);
- do {
-- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
-+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
- rx_packets = cpu_stats->rx_packets;
- rx_bytes = cpu_stats->rx_bytes;
- tx_packets = cpu_stats->tx_packets;
- tx_bytes = cpu_stats->tx_bytes;
-- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
-
- stats->rx_packets += rx_packets;
- stats->rx_bytes += rx_bytes;
-diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
-index 9eb9eaff4dc9..1bb596a9d8a2 100644
---- a/drivers/net/ethernet/ti/netcp_core.c
-+++ b/drivers/net/ethernet/ti/netcp_core.c
-@@ -1916,16 +1916,16 @@ netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
- unsigned int start;
-
- do {
-- start = u64_stats_fetch_begin_irq(&p->syncp_rx);
-+ start = u64_stats_fetch_begin(&p->syncp_rx);
- rxpackets = p->rx_packets;
- rxbytes = p->rx_bytes;
-- } while (u64_stats_fetch_retry_irq(&p->syncp_rx, start));
-+ } while (u64_stats_fetch_retry(&p->syncp_rx, start));
-
- do {
-- start = u64_stats_fetch_begin_irq(&p->syncp_tx);
-+ start = u64_stats_fetch_begin(&p->syncp_tx);
- txpackets = p->tx_packets;
- txbytes = p->tx_bytes;
-- } while (u64_stats_fetch_retry_irq(&p->syncp_tx, start));
-+ } while (u64_stats_fetch_retry(&p->syncp_tx, start));
-
- stats->rx_packets = rxpackets;
- stats->rx_bytes = rxbytes;
-diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
-index 0fb15a17b547..d716e6fe26e1 100644
---- a/drivers/net/ethernet/via/via-rhine.c
-+++ b/drivers/net/ethernet/via/via-rhine.c
-@@ -2217,16 +2217,16 @@ rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
- netdev_stats_to_stats64(stats, &dev->stats);
-
- do {
-- start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
-+ start = u64_stats_fetch_begin(&rp->rx_stats.syncp);
- stats->rx_packets = rp->rx_stats.packets;
- stats->rx_bytes = rp->rx_stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
-+ } while (u64_stats_fetch_retry(&rp->rx_stats.syncp, start));
-
- do {
-- start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
-+ start = u64_stats_fetch_begin(&rp->tx_stats.syncp);
- stats->tx_packets = rp->tx_stats.packets;
- stats->tx_bytes = rp->tx_stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
-+ } while (u64_stats_fetch_retry(&rp->tx_stats.syncp, start));
- }
-
- static void rhine_set_rx_mode(struct net_device *dev)
-diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
-index 5ea9dc251dd9..c678876a7826 100644
---- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
-+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
-@@ -1305,16 +1305,16 @@ axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
- netdev_stats_to_stats64(stats, &dev->stats);
-
- do {
-- start = u64_stats_fetch_begin_irq(&lp->rx_stat_sync);
-+ start = u64_stats_fetch_begin(&lp->rx_stat_sync);
- stats->rx_packets = u64_stats_read(&lp->rx_packets);
- stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
-- } while (u64_stats_fetch_retry_irq(&lp->rx_stat_sync, start));
-+ } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
-
- do {
-- start = u64_stats_fetch_begin_irq(&lp->tx_stat_sync);
-+ start = u64_stats_fetch_begin(&lp->tx_stat_sync);
- stats->tx_packets = u64_stats_read(&lp->tx_packets);
- stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
-- } while (u64_stats_fetch_retry_irq(&lp->tx_stat_sync, start));
-+ } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
- }
-
- static const struct net_device_ops axienet_netdev_ops = {
-diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
-index 0285894c892a..cf87d7ed3779 100644
---- a/drivers/net/hyperv/netvsc_drv.c
-+++ b/drivers/net/hyperv/netvsc_drv.c
-@@ -1264,12 +1264,12 @@ static void netvsc_get_vf_stats(struct net_device *net,
- unsigned int start;
-
- do {
-- start = u64_stats_fetch_begin_irq(&stats->syncp);
-+ start = u64_stats_fetch_begin(&stats->syncp);
- rx_packets = stats->rx_packets;
- tx_packets = stats->tx_packets;
- rx_bytes = stats->rx_bytes;
- tx_bytes = stats->tx_bytes;
-- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&stats->syncp, start));
-
- tot->rx_packets += rx_packets;
- tot->tx_packets += tx_packets;
-@@ -1294,12 +1294,12 @@ static void netvsc_get_pcpu_stats(struct net_device *net,
- unsigned int start;
-
- do {
-- start = u64_stats_fetch_begin_irq(&stats->syncp);
-+ start = u64_stats_fetch_begin(&stats->syncp);
- this_tot->vf_rx_packets = stats->rx_packets;
- this_tot->vf_tx_packets = stats->tx_packets;
- this_tot->vf_rx_bytes = stats->rx_bytes;
- this_tot->vf_tx_bytes = stats->tx_bytes;
-- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&stats->syncp, start));
- this_tot->rx_packets = this_tot->vf_rx_packets;
- this_tot->tx_packets = this_tot->vf_tx_packets;
- this_tot->rx_bytes = this_tot->vf_rx_bytes;
-@@ -1318,20 +1318,20 @@ static void netvsc_get_pcpu_stats(struct net_device *net,
-
- tx_stats = &nvchan->tx_stats;
- do {
-- start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
-+ start = u64_stats_fetch_begin(&tx_stats->syncp);
- packets = tx_stats->packets;
- bytes = tx_stats->bytes;
-- } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
-
- this_tot->tx_bytes += bytes;
- this_tot->tx_packets += packets;
-
- rx_stats = &nvchan->rx_stats;
- do {
-- start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
-+ start = u64_stats_fetch_begin(&rx_stats->syncp);
- packets = rx_stats->packets;
- bytes = rx_stats->bytes;
-- } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
-
- this_tot->rx_bytes += bytes;
- this_tot->rx_packets += packets;
-@@ -1370,21 +1370,21 @@ static void netvsc_get_stats64(struct net_device *net,
-
- tx_stats = &nvchan->tx_stats;
- do {
-- start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
-+ start = u64_stats_fetch_begin(&tx_stats->syncp);
- packets = tx_stats->packets;
- bytes = tx_stats->bytes;
-- } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
-
- t->tx_bytes += bytes;
- t->tx_packets += packets;
-
- rx_stats = &nvchan->rx_stats;
- do {
-- start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
-+ start = u64_stats_fetch_begin(&rx_stats->syncp);
- packets = rx_stats->packets;
- bytes = rx_stats->bytes;
- multicast = rx_stats->multicast + rx_stats->broadcast;
-- } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
-
- t->rx_bytes += bytes;
- t->rx_packets += packets;
-@@ -1527,24 +1527,24 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
- tx_stats = &nvdev->chan_table[j].tx_stats;
-
- do {
-- start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
-+ start = u64_stats_fetch_begin(&tx_stats->syncp);
- packets = tx_stats->packets;
- bytes = tx_stats->bytes;
- xdp_xmit = tx_stats->xdp_xmit;
-- } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
- data[i++] = packets;
- data[i++] = bytes;
- data[i++] = xdp_xmit;
-
- rx_stats = &nvdev->chan_table[j].rx_stats;
- do {
-- start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
-+ start = u64_stats_fetch_begin(&rx_stats->syncp);
- packets = rx_stats->packets;
- bytes = rx_stats->bytes;
- xdp_drop = rx_stats->xdp_drop;
- xdp_redirect = rx_stats->xdp_redirect;
- xdp_tx = rx_stats->xdp_tx;
-- } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
- data[i++] = packets;
- data[i++] = bytes;
- data[i++] = xdp_drop;
-diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
-index 1c64d5347b8e..78253ad57b2e 100644
---- a/drivers/net/ifb.c
-+++ b/drivers/net/ifb.c
-@@ -162,18 +162,18 @@ static void ifb_stats64(struct net_device *dev,
-
- for (i = 0; i < dev->num_tx_queues; i++,txp++) {
- do {
-- start = u64_stats_fetch_begin_irq(&txp->rx_stats.sync);
-+ start = u64_stats_fetch_begin(&txp->rx_stats.sync);
- packets = txp->rx_stats.packets;
- bytes = txp->rx_stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&txp->rx_stats.sync, start));
-+ } while (u64_stats_fetch_retry(&txp->rx_stats.sync, start));
- stats->rx_packets += packets;
- stats->rx_bytes += bytes;
-
- do {
-- start = u64_stats_fetch_begin_irq(&txp->tx_stats.sync);
-+ start = u64_stats_fetch_begin(&txp->tx_stats.sync);
- packets = txp->tx_stats.packets;
- bytes = txp->tx_stats.bytes;
-- } while (u64_stats_fetch_retry_irq(&txp->tx_stats.sync, start));
-+ } while (u64_stats_fetch_retry(&txp->tx_stats.sync, start));
- stats->tx_packets += packets;
- stats->tx_bytes += bytes;
- }
-@@ -245,12 +245,12 @@ static void ifb_fill_stats_data(u64 **data,
- int j;
-
- do {
-- start = u64_stats_fetch_begin_irq(&q_stats->sync);
-+ start = u64_stats_fetch_begin(&q_stats->sync);
- for (j = 0; j < IFB_Q_STATS_LEN; j++) {
- offset = ifb_q_stats_desc[j].offset;
- (*data)[j] = *(u64 *)(stats_base + offset);
- }
-- } while (u64_stats_fetch_retry_irq(&q_stats->sync, start));
-+ } while (u64_stats_fetch_retry(&q_stats->sync, start));
-
- *data += IFB_Q_STATS_LEN;
- }
-diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
-index fbf2d5b67aaf..57c79f5f2991 100644
---- a/drivers/net/ipvlan/ipvlan_main.c
-+++ b/drivers/net/ipvlan/ipvlan_main.c
-@@ -301,13 +301,13 @@ static void ipvlan_get_stats64(struct net_device *dev,
- for_each_possible_cpu(idx) {
- pcptr = per_cpu_ptr(ipvlan->pcpu_stats, idx);
- do {
-- strt= u64_stats_fetch_begin_irq(&pcptr->syncp);
-+ strt = u64_stats_fetch_begin(&pcptr->syncp);
- rx_pkts = u64_stats_read(&pcptr->rx_pkts);
- rx_bytes = u64_stats_read(&pcptr->rx_bytes);
- rx_mcast = u64_stats_read(&pcptr->rx_mcast);
- tx_pkts = u64_stats_read(&pcptr->tx_pkts);
- tx_bytes = u64_stats_read(&pcptr->tx_bytes);
-- } while (u64_stats_fetch_retry_irq(&pcptr->syncp,
-+ } while (u64_stats_fetch_retry(&pcptr->syncp,
- strt));
-
- s->rx_packets += rx_pkts;
-diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
-index 2e9742952c4e..f6d53e63ef4e 100644
---- a/drivers/net/loopback.c
-+++ b/drivers/net/loopback.c
-@@ -106,10 +106,10 @@ void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes)
-
- lb_stats = per_cpu_ptr(dev->lstats, i);
- do {
-- start = u64_stats_fetch_begin_irq(&lb_stats->syncp);
-+ start = u64_stats_fetch_begin(&lb_stats->syncp);
- tpackets = u64_stats_read(&lb_stats->packets);
- tbytes = u64_stats_read(&lb_stats->bytes);
-- } while (u64_stats_fetch_retry_irq(&lb_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&lb_stats->syncp, start));
- *bytes += tbytes;
- *packets += tpackets;
- }
-diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
-index 209ee9f35275..f9f25b5f1745 100644
---- a/drivers/net/macsec.c
-+++ b/drivers/net/macsec.c
-@@ -2802,9 +2802,9 @@ static void get_rx_sc_stats(struct net_device *dev,
-
- stats = per_cpu_ptr(rx_sc->stats, cpu);
- do {
-- start = u64_stats_fetch_begin_irq(&stats->syncp);
-+ start = u64_stats_fetch_begin(&stats->syncp);
- memcpy(&tmp, &stats->stats, sizeof(tmp));
-- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&stats->syncp, start));
-
- sum->InOctetsValidated += tmp.InOctetsValidated;
- sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
-@@ -2883,9 +2883,9 @@ static void get_tx_sc_stats(struct net_device *dev,
-
- stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
- do {
-- start = u64_stats_fetch_begin_irq(&stats->syncp);
-+ start = u64_stats_fetch_begin(&stats->syncp);
- memcpy(&tmp, &stats->stats, sizeof(tmp));
-- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&stats->syncp, start));
-
- sum->OutPktsProtected += tmp.OutPktsProtected;
- sum->OutPktsEncrypted += tmp.OutPktsEncrypted;
-@@ -2939,9 +2939,9 @@ static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
-
- stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
- do {
-- start = u64_stats_fetch_begin_irq(&stats->syncp);
-+ start = u64_stats_fetch_begin(&stats->syncp);
- memcpy(&tmp, &stats->stats, sizeof(tmp));
-- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&stats->syncp, start));
-
- sum->OutPktsUntagged += tmp.OutPktsUntagged;
- sum->InPktsUntagged += tmp.InPktsUntagged;
-diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
-index 012830d12fde..9bea3f1b0a8a 100644
---- a/drivers/net/macvlan.c
-+++ b/drivers/net/macvlan.c
-@@ -948,13 +948,13 @@ static void macvlan_dev_get_stats64(struct net_device *dev,
- for_each_possible_cpu(i) {
- p = per_cpu_ptr(vlan->pcpu_stats, i);
- do {
-- start = u64_stats_fetch_begin_irq(&p->syncp);
-+ start = u64_stats_fetch_begin(&p->syncp);
- rx_packets = u64_stats_read(&p->rx_packets);
- rx_bytes = u64_stats_read(&p->rx_bytes);
- rx_multicast = u64_stats_read(&p->rx_multicast);
- tx_packets = u64_stats_read(&p->tx_packets);
- tx_bytes = u64_stats_read(&p->tx_bytes);
-- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
-+ } while (u64_stats_fetch_retry(&p->syncp, start));
-
- stats->rx_packets += rx_packets;
- stats->rx_bytes += rx_bytes;
-diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
-index 0b9d37979133..3d322ac4f6a5 100644
---- a/drivers/net/mhi_net.c
-+++ b/drivers/net/mhi_net.c
-@@ -104,19 +104,19 @@ static void mhi_ndo_get_stats64(struct net_device *ndev,
- unsigned int start;
-
- do {
-- start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.rx_syncp);
-+ start = u64_stats_fetch_begin(&mhi_netdev->stats.rx_syncp);
- stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets);
- stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes);
- stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors);
-- } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start));
-+ } while (u64_stats_fetch_retry(&mhi_netdev->stats.rx_syncp, start));
-
- do {
-- start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.tx_syncp);
-+ start = u64_stats_fetch_begin(&mhi_netdev->stats.tx_syncp);
- stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets);
- stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes);
- stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors);
- stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped);
-- } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.tx_syncp, start));
-+ } while (u64_stats_fetch_retry(&mhi_netdev->stats.tx_syncp, start));
- }
-
- static const struct net_device_ops mhi_netdev_ops = {
-diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
-index 9a1a5b203624..e470e3398abc 100644
---- a/drivers/net/netdevsim/netdev.c
-+++ b/drivers/net/netdevsim/netdev.c
-@@ -67,10 +67,10 @@ nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
- unsigned int start;
-
- do {
-- start = u64_stats_fetch_begin_irq(&ns->syncp);
-+ start = u64_stats_fetch_begin(&ns->syncp);
- stats->tx_bytes = ns->tx_bytes;
- stats->tx_packets = ns->tx_packets;
-- } while (u64_stats_fetch_retry_irq(&ns->syncp, start));
-+ } while (u64_stats_fetch_retry(&ns->syncp, start));
- }
-
- static int
-diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
-index 293eaf6b3ec9..eccf9df0c88c 100644
---- a/drivers/net/team/team.c
-+++ b/drivers/net/team/team.c
-@@ -1868,13 +1868,13 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
- for_each_possible_cpu(i) {
- p = per_cpu_ptr(team->pcpu_stats, i);
- do {
-- start = u64_stats_fetch_begin_irq(&p->syncp);
-+ start = u64_stats_fetch_begin(&p->syncp);
- rx_packets = u64_stats_read(&p->rx_packets);
- rx_bytes = u64_stats_read(&p->rx_bytes);
- rx_multicast = u64_stats_read(&p->rx_multicast);
- tx_packets = u64_stats_read(&p->tx_packets);
- tx_bytes = u64_stats_read(&p->tx_bytes);
-- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
-+ } while (u64_stats_fetch_retry(&p->syncp, start));
-
- stats->rx_packets += rx_packets;
- stats->rx_bytes += rx_bytes;
-diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
-index b095a4b4957b..18d99fda997c 100644
---- a/drivers/net/team/team_mode_loadbalance.c
-+++ b/drivers/net/team/team_mode_loadbalance.c
-@@ -466,9 +466,9 @@ static void __lb_one_cpu_stats_add(struct lb_stats *acc_stats,
- struct lb_stats tmp;
-
- do {
-- start = u64_stats_fetch_begin_irq(syncp);
-+ start = u64_stats_fetch_begin(syncp);
- tmp.tx_bytes = cpu_stats->tx_bytes;
-- } while (u64_stats_fetch_retry_irq(syncp, start));
-+ } while (u64_stats_fetch_retry(syncp, start));
- acc_stats->tx_bytes += tmp.tx_bytes;
- }
-
-diff --git a/drivers/net/veth.c b/drivers/net/veth.c
-index 36c5a41f84e4..605f511a886c 100644
---- a/drivers/net/veth.c
-+++ b/drivers/net/veth.c
-@@ -182,12 +182,12 @@ static void veth_get_ethtool_stats(struct net_device *dev,
- size_t offset;
-
- do {
-- start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
-+ start = u64_stats_fetch_begin(&rq_stats->syncp);
- for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
- offset = veth_rq_stats_desc[j].offset;
- data[idx + j] = *(u64 *)(stats_base + offset);
- }
-- } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&rq_stats->syncp, start));
- idx += VETH_RQ_STATS_LEN;
- }
-
-@@ -203,12 +203,12 @@ static void veth_get_ethtool_stats(struct net_device *dev,
-
- tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
- do {
-- start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
-+ start = u64_stats_fetch_begin(&rq_stats->syncp);
- for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
- offset = veth_tq_stats_desc[j].offset;
- data[tx_idx + j] += *(u64 *)(base + offset);
- }
-- } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&rq_stats->syncp, start));
- }
- }
-
-@@ -381,13 +381,13 @@ static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
- unsigned int start;
-
- do {
-- start = u64_stats_fetch_begin_irq(&stats->syncp);
-+ start = u64_stats_fetch_begin(&stats->syncp);
- peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
- xdp_tx_err = stats->vs.xdp_tx_err;
- packets = stats->vs.xdp_packets;
- bytes = stats->vs.xdp_bytes;
- drops = stats->vs.rx_drops;
-- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&stats->syncp, start));
- result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
- result->xdp_tx_err += xdp_tx_err;
- result->xdp_packets += packets;
-diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
-index 21d3461fb5d1..666622ae4b9d 100644
---- a/drivers/net/virtio_net.c
-+++ b/drivers/net/virtio_net.c
-@@ -2107,18 +2107,18 @@ static void virtnet_stats(struct net_device *dev,
- struct send_queue *sq = &vi->sq[i];
-
- do {
-- start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
-+ start = u64_stats_fetch_begin(&sq->stats.syncp);
- tpackets = sq->stats.packets;
- tbytes = sq->stats.bytes;
- terrors = sq->stats.tx_timeouts;
-- } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
-+ } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
-
- do {
-- start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
-+ start = u64_stats_fetch_begin(&rq->stats.syncp);
- rpackets = rq->stats.packets;
- rbytes = rq->stats.bytes;
- rdrops = rq->stats.drops;
-- } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
-+ } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
-
- tot->rx_packets += rpackets;
- tot->tx_packets += tpackets;
-@@ -2726,12 +2726,12 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
-
- stats_base = (u8 *)&rq->stats;
- do {
-- start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
-+ start = u64_stats_fetch_begin(&rq->stats.syncp);
- for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
- offset = virtnet_rq_stats_desc[j].offset;
- data[idx + j] = *(u64 *)(stats_base + offset);
- }
-- } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
-+ } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
- idx += VIRTNET_RQ_STATS_LEN;
- }
-
-@@ -2740,12 +2740,12 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
-
- stats_base = (u8 *)&sq->stats;
- do {
-- start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
-+ start = u64_stats_fetch_begin(&sq->stats.syncp);
- for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
- offset = virtnet_sq_stats_desc[j].offset;
- data[idx + j] = *(u64 *)(stats_base + offset);
- }
-- } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
-+ } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
- idx += VIRTNET_SQ_STATS_LEN;
- }
- }
-diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
-index 208df4d41939..6043e63b42f9 100644
---- a/drivers/net/vrf.c
-+++ b/drivers/net/vrf.c
-@@ -159,13 +159,13 @@ static void vrf_get_stats64(struct net_device *dev,
-
- dstats = per_cpu_ptr(dev->dstats, i);
- do {
-- start = u64_stats_fetch_begin_irq(&dstats->syncp);
-+ start = u64_stats_fetch_begin(&dstats->syncp);
- tbytes = dstats->tx_bytes;
- tpkts = dstats->tx_pkts;
- tdrops = dstats->tx_drps;
- rbytes = dstats->rx_bytes;
- rpkts = dstats->rx_pkts;
-- } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
-+ } while (u64_stats_fetch_retry(&dstats->syncp, start));
- stats->tx_bytes += tbytes;
- stats->tx_packets += tpkts;
- stats->tx_dropped += tdrops;
-diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
-index c5cf55030158..c3ff30ab782e 100644
---- a/drivers/net/vxlan/vxlan_vnifilter.c
-+++ b/drivers/net/vxlan/vxlan_vnifilter.c
-@@ -129,9 +129,9 @@ static void vxlan_vnifilter_stats_get(const struct vxlan_vni_node *vninode,
-
- pstats = per_cpu_ptr(vninode->stats, i);
- do {
-- start = u64_stats_fetch_begin_irq(&pstats->syncp);
-+ start = u64_stats_fetch_begin(&pstats->syncp);
- memcpy(&temp, &pstats->stats, sizeof(temp));
-- } while (u64_stats_fetch_retry_irq(&pstats->syncp, start));
-+ } while (u64_stats_fetch_retry(&pstats->syncp, start));
-
- dest->rx_packets += temp.rx_packets;
- dest->rx_bytes += temp.rx_bytes;
-diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
-index ef70bb7c88ad..3f72ae943b29 100644
---- a/drivers/net/wwan/mhi_wwan_mbim.c
-+++ b/drivers/net/wwan/mhi_wwan_mbim.c
-@@ -456,19 +456,19 @@ static void mhi_mbim_ndo_get_stats64(struct net_device *ndev,
- unsigned int start;
-
- do {
-- start = u64_stats_fetch_begin_irq(&link->rx_syncp);
-+ start = u64_stats_fetch_begin(&link->rx_syncp);
- stats->rx_packets = u64_stats_read(&link->rx_packets);
- stats->rx_bytes = u64_stats_read(&link->rx_bytes);
- stats->rx_errors = u64_stats_read(&link->rx_errors);
-- } while (u64_stats_fetch_retry_irq(&link->rx_syncp, start));
-+ } while (u64_stats_fetch_retry(&link->rx_syncp, start));
-
- do {
-- start = u64_stats_fetch_begin_irq(&link->tx_syncp);
-+ start = u64_stats_fetch_begin(&link->tx_syncp);
- stats->tx_packets = u64_stats_read(&link->tx_packets);
- stats->tx_bytes = u64_stats_read(&link->tx_bytes);
- stats->tx_errors = u64_stats_read(&link->tx_errors);
- stats->tx_dropped = u64_stats_read(&link->tx_dropped);
-- } while (u64_stats_fetch_retry_irq(&link->tx_syncp, start));
-+ } while (u64_stats_fetch_retry(&link->tx_syncp, start));
- }
-
- static void mhi_mbim_ul_callback(struct mhi_device *mhi_dev,
-diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
-index dc404e05970c..14aec417fa06 100644
---- a/drivers/net/xen-netfront.c
-+++ b/drivers/net/xen-netfront.c
-@@ -1392,16 +1392,16 @@ static void xennet_get_stats64(struct net_device *dev,
- unsigned int start;
-
- do {
-- start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
-+ start = u64_stats_fetch_begin(&tx_stats->syncp);
- tx_packets = tx_stats->packets;
- tx_bytes = tx_stats->bytes;
-- } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
-
- do {
-- start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
-+ start = u64_stats_fetch_begin(&rx_stats->syncp);
- rx_packets = rx_stats->packets;
- rx_bytes = rx_stats->bytes;
-- } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
-
- tot->rx_packets += rx_packets;
- tot->tx_packets += tx_packets;
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0006-locking-rtmutex-Add-a-lockdep-assert-to-catch-potent.patch b/buildroot-external/patches/linux/0006-locking-rtmutex-Add-a-lockdep-assert-to-catch-potent.patch
new file mode 100644
index 00000000..5db59d4f
--- /dev/null
+++ b/buildroot-external/patches/linux/0006-locking-rtmutex-Add-a-lockdep-assert-to-catch-potent.patch
@@ -0,0 +1,66 @@
+From beef31a7b4f8b038bfd4490f654df84668b806dc Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner
+Date: Fri, 8 Sep 2023 18:22:53 +0200
+Subject: [PATCH 006/195] locking/rtmutex: Add a lockdep assert to catch
+ potential nested blocking
+
+There used to be a BUG_ON(current->pi_blocked_on) in the lock acquisition
+functions, but that vanished in one of the rtmutex overhauls.
+
+Bring it back in form of a lockdep assert to catch code paths which take
+rtmutex based locks with current::pi_blocked_on != NULL.
+
+Reported-by: Crystal Wood
+Signed-off-by: Thomas Gleixner
+Signed-off-by: "Peter Zijlstra (Intel)"
+Signed-off-by: Sebastian Andrzej Siewior
+Signed-off-by: Peter Zijlstra (Intel)
+Link: https://lkml.kernel.org/r/20230908162254.999499-7-bigeasy@linutronix.de
+---
+ kernel/locking/rtmutex.c | 2 ++
+ kernel/locking/rwbase_rt.c | 2 ++
+ kernel/locking/spinlock_rt.c | 2 ++
+ 3 files changed, 6 insertions(+)
+
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index a3fe05dfd0d8..4a10e8c16fd2 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1784,6 +1784,8 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
+ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
+ unsigned int state)
+ {
++ lockdep_assert(!current->pi_blocked_on);
++
+ if (likely(rt_mutex_try_acquire(lock)))
+ return 0;
+
+diff --git a/kernel/locking/rwbase_rt.c b/kernel/locking/rwbase_rt.c
+index c7258cb32d91..34a59569db6b 100644
+--- a/kernel/locking/rwbase_rt.c
++++ b/kernel/locking/rwbase_rt.c
+@@ -133,6 +133,8 @@ static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
+ static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb,
+ unsigned int state)
+ {
++ lockdep_assert(!current->pi_blocked_on);
++
+ if (rwbase_read_trylock(rwb))
+ return 0;
+
+diff --git a/kernel/locking/spinlock_rt.c b/kernel/locking/spinlock_rt.c
+index 842037b2ba54..38e292454fcc 100644
+--- a/kernel/locking/spinlock_rt.c
++++ b/kernel/locking/spinlock_rt.c
+@@ -37,6 +37,8 @@
+
+ static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
+ {
++ lockdep_assert(!current->pi_blocked_on);
++
+ if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
+ rtlock_slowlock(rtm);
+ }
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0006-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users-n.patch b/buildroot-external/patches/linux/0006-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users-n.patch
deleted file mode 100644
index e0e4cbc3..00000000
--- a/buildroot-external/patches/linux/0006-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users-n.patch
+++ /dev/null
@@ -1,392 +0,0 @@
-From 62269129168ade34b47e64d0c82c16ea665e2bfe Mon Sep 17 00:00:00 2001
-From: Thomas Gleixner
-Date: Thu, 25 Aug 2022 16:17:37 +0200
-Subject: [PATCH 06/62] net: Remove the obsolte u64_stats_fetch_*_irq() users
- (net).
-
-Now that the 32bit UP oddity is gone and 32bit uses always a sequence
-count, there is no need for the fetch_irq() variants anymore.
-
-Convert to the regular interface.
-
-Signed-off-by: Thomas Gleixner
-Signed-off-by: Sebastian Andrzej Siewior
-Acked-by: Peter Zijlstra (Intel)
----
- net/8021q/vlan_dev.c | 4 ++--
- net/bridge/br_multicast.c | 4 ++--
- net/bridge/br_vlan.c | 4 ++--
- net/core/dev.c | 4 ++--
- net/core/drop_monitor.c | 8 ++++----
- net/core/gen_stats.c | 16 ++++++++--------
- net/devlink/leftover.c | 4 ++--
- net/dsa/slave.c | 4 ++--
- net/ipv4/af_inet.c | 4 ++--
- net/ipv6/seg6_local.c | 4 ++--
- net/mac80211/sta_info.c | 8 ++++----
- net/mpls/af_mpls.c | 4 ++--
- net/netfilter/ipvs/ip_vs_ctl.c | 4 ++--
- net/netfilter/nf_tables_api.c | 4 ++--
- net/openvswitch/datapath.c | 4 ++--
- net/openvswitch/flow_table.c | 9 ++++-----
- 16 files changed, 44 insertions(+), 45 deletions(-)
-
-diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
-index d3e511e1eba8..0fa52bcc296b 100644
---- a/net/8021q/vlan_dev.c
-+++ b/net/8021q/vlan_dev.c
-@@ -712,13 +712,13 @@ static void vlan_dev_get_stats64(struct net_device *dev,
-
- p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
- do {
-- start = u64_stats_fetch_begin_irq(&p->syncp);
-+ start = u64_stats_fetch_begin(&p->syncp);
- rxpackets = u64_stats_read(&p->rx_packets);
- rxbytes = u64_stats_read(&p->rx_bytes);
- rxmulticast = u64_stats_read(&p->rx_multicast);
- txpackets = u64_stats_read(&p->tx_packets);
- txbytes = u64_stats_read(&p->tx_bytes);
-- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
-+ } while (u64_stats_fetch_retry(&p->syncp, start));
-
- stats->rx_packets += rxpackets;
- stats->rx_bytes += rxbytes;
-diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
-index db4f2641d1cd..7e2a9fb5786c 100644
---- a/net/bridge/br_multicast.c
-+++ b/net/bridge/br_multicast.c
-@@ -4899,9 +4899,9 @@ void br_multicast_get_stats(const struct net_bridge *br,
- unsigned int start;
-
- do {
-- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
-+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
- memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
-- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
-
- mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
- mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
-diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
-index 9ffd40b8270c..bc75fa1e4666 100644
---- a/net/bridge/br_vlan.c
-+++ b/net/bridge/br_vlan.c
-@@ -1389,12 +1389,12 @@ void br_vlan_get_stats(const struct net_bridge_vlan *v,
-
- cpu_stats = per_cpu_ptr(v->stats, i);
- do {
-- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
-+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
- rxpackets = u64_stats_read(&cpu_stats->rx_packets);
- rxbytes = u64_stats_read(&cpu_stats->rx_bytes);
- txbytes = u64_stats_read(&cpu_stats->tx_bytes);
- txpackets = u64_stats_read(&cpu_stats->tx_packets);
-- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
-
- u64_stats_add(&stats->rx_packets, rxpackets);
- u64_stats_add(&stats->rx_bytes, rxbytes);
-diff --git a/net/core/dev.c b/net/core/dev.c
-index 0a5566b6f8a2..29ae6265a408 100644
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -10508,12 +10508,12 @@ void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
-
- stats = per_cpu_ptr(netstats, cpu);
- do {
-- start = u64_stats_fetch_begin_irq(&stats->syncp);
-+ start = u64_stats_fetch_begin(&stats->syncp);
- rx_packets = u64_stats_read(&stats->rx_packets);
- rx_bytes = u64_stats_read(&stats->rx_bytes);
- tx_packets = u64_stats_read(&stats->tx_packets);
- tx_bytes = u64_stats_read(&stats->tx_bytes);
-- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&stats->syncp, start));
-
- s->rx_packets += rx_packets;
- s->rx_bytes += rx_bytes;
-diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
-index 8e0a90b45df2..4d5e8b317c47 100644
---- a/net/core/drop_monitor.c
-+++ b/net/core/drop_monitor.c
-@@ -1432,9 +1432,9 @@ static void net_dm_stats_read(struct net_dm_stats *stats)
- u64 dropped;
-
- do {
-- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
-+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
- dropped = u64_stats_read(&cpu_stats->dropped);
-- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
-
- u64_stats_add(&stats->dropped, dropped);
- }
-@@ -1476,9 +1476,9 @@ static void net_dm_hw_stats_read(struct net_dm_stats *stats)
- u64 dropped;
-
- do {
-- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
-+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
- dropped = u64_stats_read(&cpu_stats->dropped);
-- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
-
- u64_stats_add(&stats->dropped, dropped);
- }
-diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
-index c8d137ef5980..b71ccaec0991 100644
---- a/net/core/gen_stats.c
-+++ b/net/core/gen_stats.c
-@@ -135,10 +135,10 @@ static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_sync *bstats,
- u64 bytes, packets;
-
- do {
-- start = u64_stats_fetch_begin_irq(&bcpu->syncp);
-+ start = u64_stats_fetch_begin(&bcpu->syncp);
- bytes = u64_stats_read(&bcpu->bytes);
- packets = u64_stats_read(&bcpu->packets);
-- } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
-+ } while (u64_stats_fetch_retry(&bcpu->syncp, start));
-
- t_bytes += bytes;
- t_packets += packets;
-@@ -162,10 +162,10 @@ void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
- }
- do {
- if (running)
-- start = u64_stats_fetch_begin_irq(&b->syncp);
-+ start = u64_stats_fetch_begin(&b->syncp);
- bytes = u64_stats_read(&b->bytes);
- packets = u64_stats_read(&b->packets);
-- } while (running && u64_stats_fetch_retry_irq(&b->syncp, start));
-+ } while (running && u64_stats_fetch_retry(&b->syncp, start));
-
- _bstats_update(bstats, bytes, packets);
- }
-@@ -187,10 +187,10 @@ static void gnet_stats_read_basic(u64 *ret_bytes, u64 *ret_packets,
- u64 bytes, packets;
-
- do {
-- start = u64_stats_fetch_begin_irq(&bcpu->syncp);
-+ start = u64_stats_fetch_begin(&bcpu->syncp);
- bytes = u64_stats_read(&bcpu->bytes);
- packets = u64_stats_read(&bcpu->packets);
-- } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
-+ } while (u64_stats_fetch_retry(&bcpu->syncp, start));
-
- t_bytes += bytes;
- t_packets += packets;
-@@ -201,10 +201,10 @@ static void gnet_stats_read_basic(u64 *ret_bytes, u64 *ret_packets,
- }
- do {
- if (running)
-- start = u64_stats_fetch_begin_irq(&b->syncp);
-+ start = u64_stats_fetch_begin(&b->syncp);
- *ret_bytes = u64_stats_read(&b->bytes);
- *ret_packets = u64_stats_read(&b->packets);
-- } while (running && u64_stats_fetch_retry_irq(&b->syncp, start));
-+ } while (running && u64_stats_fetch_retry(&b->syncp, start));
- }
-
- static int
-diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c
-index 032c7af065cd..94e8cc3de330 100644
---- a/net/devlink/leftover.c
-+++ b/net/devlink/leftover.c
-@@ -8307,10 +8307,10 @@ static void devlink_trap_stats_read(struct devlink_stats __percpu *trap_stats,
-
- cpu_stats = per_cpu_ptr(trap_stats, i);
- do {
-- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
-+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
- rx_packets = u64_stats_read(&cpu_stats->rx_packets);
- rx_bytes = u64_stats_read(&cpu_stats->rx_bytes);
-- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
-
- u64_stats_add(&stats->rx_packets, rx_packets);
- u64_stats_add(&stats->rx_bytes, rx_bytes);
-diff --git a/net/dsa/slave.c b/net/dsa/slave.c
-index 5fe075bf479e..28ee63ec1d1d 100644
---- a/net/dsa/slave.c
-+++ b/net/dsa/slave.c
-@@ -976,12 +976,12 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev,
-
- s = per_cpu_ptr(dev->tstats, i);
- do {
-- start = u64_stats_fetch_begin_irq(&s->syncp);
-+ start = u64_stats_fetch_begin(&s->syncp);
- tx_packets = u64_stats_read(&s->tx_packets);
- tx_bytes = u64_stats_read(&s->tx_bytes);
- rx_packets = u64_stats_read(&s->rx_packets);
- rx_bytes = u64_stats_read(&s->rx_bytes);
-- } while (u64_stats_fetch_retry_irq(&s->syncp, start));
-+ } while (u64_stats_fetch_retry(&s->syncp, start));
- data[0] += tx_packets;
- data[1] += tx_bytes;
- data[2] += rx_packets;
-diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
-index 347c3768df6e..bc3d36463e32 100644
---- a/net/ipv4/af_inet.c
-+++ b/net/ipv4/af_inet.c
-@@ -1726,9 +1726,9 @@ u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
- bhptr = per_cpu_ptr(mib, cpu);
- syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
- do {
-- start = u64_stats_fetch_begin_irq(syncp);
-+ start = u64_stats_fetch_begin(syncp);
- v = *(((u64 *)bhptr) + offt);
-- } while (u64_stats_fetch_retry_irq(syncp, start));
-+ } while (u64_stats_fetch_retry(syncp, start));
-
- return v;
- }
-diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
-index 8370726ae7bf..487f8e98deaa 100644
---- a/net/ipv6/seg6_local.c
-+++ b/net/ipv6/seg6_local.c
-@@ -1644,13 +1644,13 @@ static int put_nla_counters(struct sk_buff *skb, struct seg6_local_lwt *slwt)
-
- pcounters = per_cpu_ptr(slwt->pcpu_counters, i);
- do {
-- start = u64_stats_fetch_begin_irq(&pcounters->syncp);
-+ start = u64_stats_fetch_begin(&pcounters->syncp);
-
- packets = u64_stats_read(&pcounters->packets);
- bytes = u64_stats_read(&pcounters->bytes);
- errors = u64_stats_read(&pcounters->errors);
-
-- } while (u64_stats_fetch_retry_irq(&pcounters->syncp, start));
-+ } while (u64_stats_fetch_retry(&pcounters->syncp, start));
-
- counters.packets += packets;
- counters.bytes += bytes;
-diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
-index 49b71453dec3..c462e20ccc8d 100644
---- a/net/mac80211/sta_info.c
-+++ b/net/mac80211/sta_info.c
-@@ -2397,9 +2397,9 @@ static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats,
- u64 value;
-
- do {
-- start = u64_stats_fetch_begin_irq(&rxstats->syncp);
-+ start = u64_stats_fetch_begin(&rxstats->syncp);
- value = rxstats->msdu[tid];
-- } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
-+ } while (u64_stats_fetch_retry(&rxstats->syncp, start));
-
- return value;
- }
-@@ -2465,9 +2465,9 @@ static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats)
- u64 value;
-
- do {
-- start = u64_stats_fetch_begin_irq(&rxstats->syncp);
-+ start = u64_stats_fetch_begin(&rxstats->syncp);
- value = rxstats->bytes;
-- } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
-+ } while (u64_stats_fetch_retry(&rxstats->syncp, start));
-
- return value;
- }
-diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
-index f1f43894efb8..dc5165d3eec4 100644
---- a/net/mpls/af_mpls.c
-+++ b/net/mpls/af_mpls.c
-@@ -1079,9 +1079,9 @@ static void mpls_get_stats(struct mpls_dev *mdev,
-
- p = per_cpu_ptr(mdev->stats, i);
- do {
-- start = u64_stats_fetch_begin_irq(&p->syncp);
-+ start = u64_stats_fetch_begin(&p->syncp);
- local = p->stats;
-- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
-+ } while (u64_stats_fetch_retry(&p->syncp, start));
-
- stats->rx_packets += local.rx_packets;
- stats->rx_bytes += local.rx_bytes;
-diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
-index 17a1b731a76b..2be696513629 100644
---- a/net/netfilter/ipvs/ip_vs_ctl.c
-+++ b/net/netfilter/ipvs/ip_vs_ctl.c
-@@ -2299,13 +2299,13 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
- u64 conns, inpkts, outpkts, inbytes, outbytes;
-
- do {
-- start = u64_stats_fetch_begin_irq(&u->syncp);
-+ start = u64_stats_fetch_begin(&u->syncp);
- conns = u64_stats_read(&u->cnt.conns);
- inpkts = u64_stats_read(&u->cnt.inpkts);
- outpkts = u64_stats_read(&u->cnt.outpkts);
- inbytes = u64_stats_read(&u->cnt.inbytes);
- outbytes = u64_stats_read(&u->cnt.outbytes);
-- } while (u64_stats_fetch_retry_irq(&u->syncp, start));
-+ } while (u64_stats_fetch_retry(&u->syncp, start));
-
- seq_printf(seq, "%3X %8LX %8LX %8LX %16LX %16LX\n",
- i, (u64)conns, (u64)inpkts,
-diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
-index 3d6ebb9877a4..0e237f0356ae 100644
---- a/net/netfilter/nf_tables_api.c
-+++ b/net/netfilter/nf_tables_api.c
-@@ -1692,10 +1692,10 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
- for_each_possible_cpu(cpu) {
- cpu_stats = per_cpu_ptr(stats, cpu);
- do {
-- seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
-+ seq = u64_stats_fetch_begin(&cpu_stats->syncp);
- pkts = cpu_stats->pkts;
- bytes = cpu_stats->bytes;
-- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
-+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, seq));
- total.pkts += pkts;
- total.bytes += bytes;
- }
-diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
-index 3c7b24535409..0953f531f984 100644
---- a/net/openvswitch/datapath.c
-+++ b/net/openvswitch/datapath.c
-@@ -716,9 +716,9 @@ static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
- percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
-
- do {
-- start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
-+ start = u64_stats_fetch_begin(&percpu_stats->syncp);
- local_stats = *percpu_stats;
-- } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&percpu_stats->syncp, start));
-
- stats->n_hit += local_stats.n_hit;
- stats->n_missed += local_stats.n_missed;
-diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
-index d4a2db0b2299..0a0e4c283f02 100644
---- a/net/openvswitch/flow_table.c
-+++ b/net/openvswitch/flow_table.c
-@@ -205,9 +205,9 @@ static void tbl_mask_array_reset_counters(struct mask_array *ma)
-
- stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
- do {
-- start = u64_stats_fetch_begin_irq(&stats->syncp);
-+ start = u64_stats_fetch_begin(&stats->syncp);
- counter = stats->usage_cntrs[i];
-- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
-+ } while (u64_stats_fetch_retry(&stats->syncp, start));
-
- ma->masks_usage_zero_cntr[i] += counter;
- }
-@@ -1136,10 +1136,9 @@ void ovs_flow_masks_rebalance(struct flow_table *table)
-
- stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
- do {
-- start = u64_stats_fetch_begin_irq(&stats->syncp);
-+ start = u64_stats_fetch_begin(&stats->syncp);
- counter = stats->usage_cntrs[i];
-- } while (u64_stats_fetch_retry_irq(&stats->syncp,
-- start));
-+ } while (u64_stats_fetch_retry(&stats->syncp, start));
-
- masks_and_count[i].counter += counter;
- }
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0007-bpf-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch b/buildroot-external/patches/linux/0007-bpf-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch
deleted file mode 100644
index 4b980926..00000000
--- a/buildroot-external/patches/linux/0007-bpf-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch
+++ /dev/null
@@ -1,50 +0,0 @@
-From 9350167d72e9692f289b2569b6f24da0b894aac6 Mon Sep 17 00:00:00 2001
-From: Thomas Gleixner
-Date: Thu, 25 Aug 2022 16:17:57 +0200
-Subject: [PATCH 07/62] bpf: Remove the obsolte u64_stats_fetch_*_irq() users.
-
-Now that the 32bit UP oddity is gone and 32bit uses always a sequence
-count, there is no need for the fetch_irq() variants anymore.
-
-Convert to the regular interface.
-
-Cc: Alexei Starovoitov
-Cc: Andrii Nakryiko
-Cc: Daniel Borkmann
-Cc: Hao Luo
-Cc: Jiri Olsa
-Cc: John Fastabend
-Cc: KP Singh
-Cc: Martin KaFai Lau
-Cc: Song Liu
-Cc: Stanislav Fomichev
-Cc: Yonghong Song
-Cc: bpf@vger.kernel.org
-Signed-off-by: Thomas Gleixner
-Signed-off-by: Sebastian Andrzej Siewior
-Acked-by: Peter Zijlstra (Intel)
----
- kernel/bpf/syscall.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
-index 0c8b7733573e..c0915e2424f1 100644
---- a/kernel/bpf/syscall.c
-+++ b/kernel/bpf/syscall.c
-@@ -2115,11 +2115,11 @@ static void bpf_prog_get_stats(const struct bpf_prog *prog,
-
- st = per_cpu_ptr(prog->stats, cpu);
- do {
-- start = u64_stats_fetch_begin_irq(&st->syncp);
-+ start = u64_stats_fetch_begin(&st->syncp);
- tnsecs = u64_stats_read(&st->nsecs);
- tcnt = u64_stats_read(&st->cnt);
- tmisses = u64_stats_read(&st->misses);
-- } while (u64_stats_fetch_retry_irq(&st->syncp, start));
-+ } while (u64_stats_fetch_retry(&st->syncp, start));
- nsecs += tnsecs;
- cnt += tcnt;
- misses += tmisses;
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0007-futex-pi-Fix-recursive-rt_mutex-waiter-state.patch b/buildroot-external/patches/linux/0007-futex-pi-Fix-recursive-rt_mutex-waiter-state.patch
new file mode 100644
index 00000000..4c553af5
--- /dev/null
+++ b/buildroot-external/patches/linux/0007-futex-pi-Fix-recursive-rt_mutex-waiter-state.patch
@@ -0,0 +1,204 @@
+From 067017a07e06c6ab7a99d0a9da097d3cdbc07d74 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra
+Date: Fri, 15 Sep 2023 17:19:44 +0200
+Subject: [PATCH 007/195] futex/pi: Fix recursive rt_mutex waiter state
+
+Some new assertions pointed out that the existing code has nested rt_mutex wait
+state in the futex code.
+
+Specifically, the futex_lock_pi() cancel case uses spin_lock() while there
+still is a rt_waiter enqueued for this task, resulting in a state where there
+are two waiters for the same task (and task_struct::pi_blocked_on gets
+scrambled).
+
+The reason to take hb->lock at this point is to avoid the wake_futex_pi()
+EAGAIN case.
+
+This happens when futex_top_waiter() and rt_mutex_top_waiter() state becomes
+inconsistent. The current rules are such that this inconsistency will not be
+observed.
+
+Notably the case that needs to be avoided is where futex_lock_pi() and
+futex_unlock_pi() interleave such that unlock will fail to observe a new
+waiter.
+
+*However* the case at hand is where a waiter is leaving, in this case the race
+means a waiter that is going away is not observed -- which is harmless,
+provided this race is explicitly handled.
+
+This is a somewhat dangerous proposition because the converse race is not
+observing a new waiter, which must absolutely not happen. But since the race is
+valid this cannot be asserted.
+
+Signed-off-by: Peter Zijlstra (Intel)
+Reviewed-by: Thomas Gleixner
+Reviewed-by: Sebastian Andrzej Siewior
+Tested-by: Sebastian Andrzej Siewior
+Link: https://lkml.kernel.org/r/20230915151943.GD6743@noisy.programming.kicks-ass.net
+---
+ kernel/futex/pi.c | 76 ++++++++++++++++++++++++++----------------
+ kernel/futex/requeue.c | 6 ++--
+ 2 files changed, 52 insertions(+), 30 deletions(-)
+
+diff --git a/kernel/futex/pi.c b/kernel/futex/pi.c
+index f8e65b27d9d6..d636a1bbd7d0 100644
+--- a/kernel/futex/pi.c
++++ b/kernel/futex/pi.c
+@@ -611,29 +611,16 @@ int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
+ /*
+ * Caller must hold a reference on @pi_state.
+ */
+-static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
++static int wake_futex_pi(u32 __user *uaddr, u32 uval,
++ struct futex_pi_state *pi_state,
++ struct rt_mutex_waiter *top_waiter)
+ {
+- struct rt_mutex_waiter *top_waiter;
+ struct task_struct *new_owner;
+ bool postunlock = false;
+ DEFINE_RT_WAKE_Q(wqh);
+ u32 curval, newval;
+ int ret = 0;
+
+- top_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
+- if (WARN_ON_ONCE(!top_waiter)) {
+- /*
+- * As per the comment in futex_unlock_pi() this should not happen.
+- *
+- * When this happens, give up our locks and try again, giving
+- * the futex_lock_pi() instance time to complete, either by
+- * waiting on the rtmutex or removing itself from the futex
+- * queue.
+- */
+- ret = -EAGAIN;
+- goto out_unlock;
+- }
+-
+ new_owner = top_waiter->task;
+
+ /*
+@@ -1046,19 +1033,33 @@ int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int tryl
+ ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
+
+ cleanup:
+- spin_lock(q.lock_ptr);
+ /*
+ * If we failed to acquire the lock (deadlock/signal/timeout), we must
+- * first acquire the hb->lock before removing the lock from the
+- * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
+- * lists consistent.
++ * must unwind the above, however we canont lock hb->lock because
++ * rt_mutex already has a waiter enqueued and hb->lock can itself try
++ * and enqueue an rt_waiter through rtlock.
++ *
++ * Doing the cleanup without holding hb->lock can cause inconsistent
++ * state between hb and pi_state, but only in the direction of not
++ * seeing a waiter that is leaving.
++ *
++ * See futex_unlock_pi(), it deals with this inconsistency.
+ *
+- * In particular; it is important that futex_unlock_pi() can not
+- * observe this inconsistency.
++ * There be dragons here, since we must deal with the inconsistency on
++ * the way out (here), it is impossible to detect/warn about the race
++ * the other way around (missing an incoming waiter).
++ *
++ * What could possibly go wrong...
+ */
+ if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
+ ret = 0;
+
++ /*
++ * Now that the rt_waiter has been dequeued, it is safe to use
++ * spinlock/rtlock (which might enqueue its own rt_waiter) and fix up
++ * the
++ */
++ spin_lock(q.lock_ptr);
+ /*
+ * Waiter is unqueued.
+ */
+@@ -1143,6 +1144,7 @@ int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
+ top_waiter = futex_top_waiter(hb, &key);
+ if (top_waiter) {
+ struct futex_pi_state *pi_state = top_waiter->pi_state;
++ struct rt_mutex_waiter *rt_waiter;
+
+ ret = -EINVAL;
+ if (!pi_state)
+@@ -1155,22 +1157,39 @@ int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
+ if (pi_state->owner != current)
+ goto out_unlock;
+
+- get_pi_state(pi_state);
+ /*
+ * By taking wait_lock while still holding hb->lock, we ensure
+- * there is no point where we hold neither; and therefore
+- * wake_futex_p() must observe a state consistent with what we
+- * observed.
++ * there is no point where we hold neither; and thereby
++ * wake_futex_pi() must observe any new waiters.
++ *
++ * Since the cleanup: case in futex_lock_pi() removes the
++ * rt_waiter without holding hb->lock, it is possible for
++ * wake_futex_pi() to not find a waiter while the above does,
++ * in this case the waiter is on the way out and it can be
++ * ignored.
+ *
+ * In particular; this forces __rt_mutex_start_proxy() to
+ * complete such that we're guaranteed to observe the
+- * rt_waiter. Also see the WARN in wake_futex_pi().
++ * rt_waiter.
+ */
+ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
++
++ /*
++ * Futex vs rt_mutex waiter state -- if there are no rt_mutex
++ * waiters even though futex thinks there are, then the waiter
++ * is leaving and the uncontended path is safe to take.
++ */
++ rt_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
++ if (!rt_waiter) {
++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
++ goto do_uncontended;
++ }
++
++ get_pi_state(pi_state);
+ spin_unlock(&hb->lock);
+
+ /* drops pi_state->pi_mutex.wait_lock */
+- ret = wake_futex_pi(uaddr, uval, pi_state);
++ ret = wake_futex_pi(uaddr, uval, pi_state, rt_waiter);
+
+ put_pi_state(pi_state);
+
+@@ -1198,6 +1217,7 @@ int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
+ return ret;
+ }
+
++do_uncontended:
+ /*
+ * We have no kernel internal state, i.e. no waiters in the
+ * kernel. Waiters which are about to queue themselves are stuck
+diff --git a/kernel/futex/requeue.c b/kernel/futex/requeue.c
+index cba8b1a6a4cc..4c73e0b81acc 100644
+--- a/kernel/futex/requeue.c
++++ b/kernel/futex/requeue.c
+@@ -850,11 +850,13 @@ int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ pi_mutex = &q.pi_state->pi_mutex;
+ ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
+
+- /* Current is not longer pi_blocked_on */
+- spin_lock(q.lock_ptr);
++ /*
++ * See futex_unlock_pi()'s cleanup: comment.
++ */
+ if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
+ ret = 0;
+
++ spin_lock(q.lock_ptr);
+ debug_rt_mutex_free_waiter(&rt_waiter);
+ /*
+ * Fixup the pi_state owner and possibly acquire the lock if we
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0008-signal-Add-proper-comment-about-the-preempt-disable-.patch b/buildroot-external/patches/linux/0008-signal-Add-proper-comment-about-the-preempt-disable-.patch
new file mode 100644
index 00000000..00627f3f
--- /dev/null
+++ b/buildroot-external/patches/linux/0008-signal-Add-proper-comment-about-the-preempt-disable-.patch
@@ -0,0 +1,52 @@
+From 854cc4a6294ebc631fceda57138d1419977e9c87 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior
+Date: Thu, 3 Aug 2023 12:09:31 +0200
+Subject: [PATCH 008/195] signal: Add proper comment about the preempt-disable
+ in ptrace_stop().
+
+Commit 53da1d9456fe7 ("fix ptrace slowness") added a preempt-disable section
+between read_unlock() and the following schedule() invocation without
+explaining why it is needed.
+
+Replace the comment with an explanation why this is needed. Clarify that
+it is needed for correctness but for performance reasons.
+
+Acked-by: Oleg Nesterov
+Signed-off-by: Sebastian Andrzej Siewior
+Link: https://lore.kernel.org/r/20230803100932.325870-2-bigeasy@linutronix.de
+---
+ kernel/signal.c | 17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 09019017d669..051ed8114cd4 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2329,10 +2329,21 @@ static int ptrace_stop(int exit_code, int why, unsigned long message,
+ do_notify_parent_cldstop(current, false, why);
+
+ /*
+- * Don't want to allow preemption here, because
+- * sys_ptrace() needs this task to be inactive.
++ * The previous do_notify_parent_cldstop() invocation woke ptracer.
++ * One a PREEMPTION kernel this can result in preemption requirement
++ * which will be fulfilled after read_unlock() and the ptracer will be
++ * put on the CPU.
++ * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
++ * this task wait in schedule(). If this task gets preempted then it
++ * remains enqueued on the runqueue. The ptracer will observe this and
++ * then sleep for a delay of one HZ tick. In the meantime this task
++ * gets scheduled, enters schedule() and will wait for the ptracer.
+ *
+- * XXX: implement read_unlock_no_resched().
++ * This preemption point is not bad from correctness point of view but
++ * extends the runtime by one HZ tick time due to the ptracer's sleep.
++ * The preempt-disable section ensures that there will be no preemption
++ * between unlock and schedule() and so improving the performance since
++ * the ptracer has no reason to sleep.
+ */
+ preempt_disable();
+ read_unlock(&tasklist_lock);
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0008-u64_stat-Remove-the-obsolete-fetch_irq-variants.patch b/buildroot-external/patches/linux/0008-u64_stat-Remove-the-obsolete-fetch_irq-variants.patch
deleted file mode 100644
index 854c3f66..00000000
--- a/buildroot-external/patches/linux/0008-u64_stat-Remove-the-obsolete-fetch_irq-variants.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From cf6d7ad80beb2d90140c212f8ccd32197e0eccdb Mon Sep 17 00:00:00 2001
-From: Thomas Gleixner
-Date: Thu, 25 Aug 2022 16:43:46 +0200
-Subject: [PATCH 08/62] u64_stat: Remove the obsolete fetch_irq() variants.
-
-Now that the 32bit UP oddity is gone and 32bit uses always a sequence
-count, there is no need for the fetch_irq() variants anymore.
-
-Delete the obsolete interfaces.
-
-Signed-off-by: Thomas Gleixner
-Signed-off-by: Sebastian Andrzej Siewior
-Acked-by: Peter Zijlstra (Intel)
----
- include/linux/u64_stats_sync.h | 12 ------------
- 1 file changed, 12 deletions(-)
-
-diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
-index 46040d66334a..ffe48e69b3f3 100644
---- a/include/linux/u64_stats_sync.h
-+++ b/include/linux/u64_stats_sync.h
-@@ -213,16 +213,4 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
- return __u64_stats_fetch_retry(syncp, start);
- }
-
--/* Obsolete interfaces */
--static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
--{
-- return u64_stats_fetch_begin(syncp);
--}
--
--static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
-- unsigned int start)
--{
-- return u64_stats_fetch_retry(syncp, start);
--}
--
- #endif /* _LINUX_U64_STATS_SYNC_H */
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0009-signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch b/buildroot-external/patches/linux/0009-signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch
new file mode 100644
index 00000000..df2b7b74
--- /dev/null
+++ b/buildroot-external/patches/linux/0009-signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch
@@ -0,0 +1,53 @@
+From 819c398c8fc580efa6af71d8880e57d762c7b53a Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior
+Date: Thu, 3 Aug 2023 12:09:32 +0200
+Subject: [PATCH 009/195] signal: Don't disable preemption in ptrace_stop() on
+ PREEMPT_RT.
+
+On PREEMPT_RT keeping preemption disabled during the invocation of
+cgroup_enter_frozen() is a problem because the function acquires css_set_lock
+which is a sleeping lock on PREEMPT_RT and must not be acquired with disabled
+preemption.
+The preempt-disabled section is only for performance optimisation
+reasons and can be avoided.
+
+Extend the comment and don't disable preemption before scheduling on
+PREEMPT_RT.
+
+Acked-by: Oleg Nesterov
+Signed-off-by: Sebastian Andrzej Siewior
+Link: https://lore.kernel.org/r/20230803100932.325870-3-bigeasy@linutronix.de
+---
+ kernel/signal.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 051ed8114cd4..b71026341056 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2344,11 +2344,20 @@ static int ptrace_stop(int exit_code, int why, unsigned long message,
+ * The preempt-disable section ensures that there will be no preemption
+ * between unlock and schedule() and so improving the performance since
+ * the ptracer has no reason to sleep.
++ *
++ * On PREEMPT_RT locking tasklist_lock does not disable preemption.
++ * Therefore the task can be preempted (after
++ * do_notify_parent_cldstop()) before unlocking tasklist_lock so there
++ * is no benefit in doing this. The optimisation is harmful on
++ * PEEMPT_RT because the spinlock_t (in cgroup_enter_frozen()) must not
++ * be acquired with disabled preemption.
+ */
+- preempt_disable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ preempt_disable();
+ read_unlock(&tasklist_lock);
+ cgroup_enter_frozen();
+- preempt_enable_no_resched();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ preempt_enable_no_resched();
+ schedule();
+ cgroup_leave_frozen(true);
+
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0010-drm-amd-display-Remove-migrate_en-dis-from-dc_fpu_be.patch b/buildroot-external/patches/linux/0010-drm-amd-display-Remove-migrate_en-dis-from-dc_fpu_be.patch
new file mode 100644
index 00000000..81ac1ac8
--- /dev/null
+++ b/buildroot-external/patches/linux/0010-drm-amd-display-Remove-migrate_en-dis-from-dc_fpu_be.patch
@@ -0,0 +1,91 @@
+From beec053e6c4b3efcfb0c71e5594de0df4651d6f8 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior
+Date: Thu, 21 Sep 2023 16:15:12 +0200
+Subject: [PATCH 010/195] drm/amd/display: Remove migrate_en/dis from
+ dc_fpu_begin().
+
+This is a revert of the commit mentioned below while it is not wrong, as
+in the kernel will explode, having migrate_disable() here it is
+complete waste of resources.
+
+Additionally commit message is plain wrong the review tag does not make
+it any better. The migrate_disable() interface has a fat comment
+describing it and it includes the word "undesired" in the headline which
+should tickle people to read it before using it.
+Initially I assumed it is worded too harsh but now I beg to differ.
+
+The reviewer of the original commit, even not understanding what
+migrate_disable() does should ask the following:
+
+- migrate_disable() is added only to the CONFIG_X86 block and it claims
+ to protect fpu_recursion_depth. Why are the other the architectures
+ excluded?
+
+- migrate_disable() is added after fpu_recursion_depth was modified.
+ Shouldn't it be added before the modification or referencing takes
+ place?
+
+Moving on.
+Disabling preemption DOES prevent CPU migration. A task, that can not be
+pushed away from the CPU by the scheduler (due to disabled preemption)
+can not be pushed or migrated to another CPU.
+
+Disabling migration DOES NOT ensure consistency of per-CPU variables. It
+only ensures that the task acts always on the same per-CPU variable. The
+task remains preemptible meaning multiple tasks can access the same
+per-CPU variable. This in turn leads to inconsistency for the statement
+
+ *pcpu -= 1;
+
+with two tasks on one CPU and a preemption point during the RMW
+operation:
+
+ Task A Task B
+ read pcpu to reg # 0
+ inc reg # 0 -> 1
+ read pcpu to reg # 0
+ inc reg # 0 -> 1
+ write reg to pcpu # 1
+ write reg to pcpu # 1
+
+At the end pcpu reads 1 but should read 2 instead. Boom.
+
+get_cpu_ptr() already contains a preempt_disable() statement. That means
+that the per-CPU variable can only be referenced by a single task which
+is currently running. The only inconsistency that can occur if the
+variable is additionally accessed from an interrupt.
+
+Remove migrate_disable/enable() from dc_fpu_begin/end().
+
+Cc: Tianci Yin
+Cc: Aurabindo Pillai
+Fixes: 0c316556d1249 ("drm/amd/display: Disable migration to ensure consistency of per-CPU variable")
+Link: https://lore.kernel.org/r/20230921141516.520471-2-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
+index 172aa10a8800..86f4c0e04654 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
+@@ -91,7 +91,6 @@ void dc_fpu_begin(const char *function_name, const int line)
+
+ if (*pcpu == 1) {
+ #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
+- migrate_disable();
+ kernel_fpu_begin();
+ #elif defined(CONFIG_PPC64)
+ if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
+@@ -132,7 +131,6 @@ void dc_fpu_end(const char *function_name, const int line)
+ if (*pcpu <= 0) {
+ #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
+ kernel_fpu_end();
+- migrate_enable();
+ #elif defined(CONFIG_PPC64)
+ if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
+ disable_kernel_vsx();
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0011-drm-amd-display-Simplify-the-per-CPU-usage.patch b/buildroot-external/patches/linux/0011-drm-amd-display-Simplify-the-per-CPU-usage.patch
new file mode 100644
index 00000000..2e8d9f5e
--- /dev/null
+++ b/buildroot-external/patches/linux/0011-drm-amd-display-Simplify-the-per-CPU-usage.patch
@@ -0,0 +1,132 @@
+From 13345b6e1dccf7f0404111eb4081834eaf628563 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior
+Date: Thu, 21 Sep 2023 16:15:13 +0200
+Subject: [PATCH 011/195] drm/amd/display: Simplify the per-CPU usage.
+
+The fpu_recursion_depth counter is used to ensure that dc_fpu_begin()
+can be invoked multiple times while the FPU-disable function itself is
+only invoked once. Also the counter part (dc_fpu_end()) is ballanced
+properly.
+
+Instead of using the get_cpu_ptr() dance around the inc it is simpler to
+increment the per-CPU variable directly. Also the per-CPU variable has
+to be incremented and decremented on the same CPU. This is ensured by
+the inner-part which disables preemption. This is kind of not obvious,
+works and the preempt-counter is touched a few times for no reason.
+
+Disable preemption before incrementing fpu_recursion_depth for the first
+time. Keep preemption disabled until dc_fpu_end() where the counter is
+decremented making it obvious that the preemption has to stay disabled
+while the counter is non-zero.
+Use simple inc/dec functions.
+Remove the nested preempt_disable/enable functions which are now not
+needed.
+
+Link: https://lore.kernel.org/r/20230921141516.520471-3-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ .../gpu/drm/amd/display/amdgpu_dm/dc_fpu.c | 50 ++++++++-----------
+ 1 file changed, 20 insertions(+), 30 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
+index 86f4c0e04654..8bd5926b47e0 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
+@@ -60,11 +60,9 @@ static DEFINE_PER_CPU(int, fpu_recursion_depth);
+ */
+ inline void dc_assert_fp_enabled(void)
+ {
+- int *pcpu, depth = 0;
++ int depth;
+
+- pcpu = get_cpu_ptr(&fpu_recursion_depth);
+- depth = *pcpu;
+- put_cpu_ptr(&fpu_recursion_depth);
++ depth = __this_cpu_read(fpu_recursion_depth);
+
+ ASSERT(depth >= 1);
+ }
+@@ -84,32 +82,27 @@ inline void dc_assert_fp_enabled(void)
+ */
+ void dc_fpu_begin(const char *function_name, const int line)
+ {
+- int *pcpu;
++ int depth;
+
+- pcpu = get_cpu_ptr(&fpu_recursion_depth);
+- *pcpu += 1;
++ preempt_disable();
++ depth = __this_cpu_inc_return(fpu_recursion_depth);
+
+- if (*pcpu == 1) {
++ if (depth == 1) {
+ #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
+ kernel_fpu_begin();
+ #elif defined(CONFIG_PPC64)
+- if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
+- preempt_disable();
++ if (cpu_has_feature(CPU_FTR_VSX_COMP))
+ enable_kernel_vsx();
+- } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
+- preempt_disable();
++ else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP))
+ enable_kernel_altivec();
+- } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
+- preempt_disable();
++ else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE))
+ enable_kernel_fp();
+- }
+ #elif defined(CONFIG_ARM64)
+ kernel_neon_begin();
+ #endif
+ }
+
+- TRACE_DCN_FPU(true, function_name, line, *pcpu);
+- put_cpu_ptr(&fpu_recursion_depth);
++ TRACE_DCN_FPU(true, function_name, line, depth);
+ }
+
+ /**
+@@ -124,29 +117,26 @@ void dc_fpu_begin(const char *function_name, const int line)
+ */
+ void dc_fpu_end(const char *function_name, const int line)
+ {
+- int *pcpu;
++ int depth;
+
+- pcpu = get_cpu_ptr(&fpu_recursion_depth);
+- *pcpu -= 1;
+- if (*pcpu <= 0) {
++ depth = __this_cpu_dec_return(fpu_recursion_depth);
++ if (depth == 0) {
+ #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
+ kernel_fpu_end();
+ #elif defined(CONFIG_PPC64)
+- if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
++ if (cpu_has_feature(CPU_FTR_VSX_COMP))
+ disable_kernel_vsx();
+- preempt_enable();
+- } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
++ else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP))
+ disable_kernel_altivec();
+- preempt_enable();
+- } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
++ else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE))
+ disable_kernel_fp();
+- preempt_enable();
+- }
+ #elif defined(CONFIG_ARM64)
+ kernel_neon_end();
+ #endif
++ } else {
++ WARN_ON_ONCE(depth < 0);
+ }
+
+- TRACE_DCN_FPU(false, function_name, line, *pcpu);
+- put_cpu_ptr(&fpu_recursion_depth);
++ TRACE_DCN_FPU(false, function_name, line, depth);
++ preempt_enable();
+ }
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0012-drm-amd-display-Add-a-warning-if-the-FPU-is-used-out.patch b/buildroot-external/patches/linux/0012-drm-amd-display-Add-a-warning-if-the-FPU-is-used-out.patch
new file mode 100644
index 00000000..12c5a57d
--- /dev/null
+++ b/buildroot-external/patches/linux/0012-drm-amd-display-Add-a-warning-if-the-FPU-is-used-out.patch
@@ -0,0 +1,31 @@
+From 533bd452afe45e25b387a11ab7748f9633995fee Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior
+Date: Thu, 21 Sep 2023 16:15:14 +0200
+Subject: [PATCH 012/195] drm/amd/display: Add a warning if the FPU is used
+ outside from task context.
+
+Add a warning if the FPU is used from any context other than task
+context. This is only precaution since the code is not able to be used
+from softirq while the API allows it on x86 for instance.
+
+Link: https://lore.kernel.org/r/20230921141516.520471-4-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
+index 8bd5926b47e0..4ae4720535a5 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
+@@ -84,6 +84,7 @@ void dc_fpu_begin(const char *function_name, const int line)
+ {
+ int depth;
+
++ WARN_ON_ONCE(!in_task());
+ preempt_disable();
+ depth = __this_cpu_inc_return(fpu_recursion_depth);
+
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0013-drm-amd-display-Move-the-memory-allocation-out-of-dc.patch b/buildroot-external/patches/linux/0013-drm-amd-display-Move-the-memory-allocation-out-of-dc.patch
new file mode 100644
index 00000000..55541c37
--- /dev/null
+++ b/buildroot-external/patches/linux/0013-drm-amd-display-Move-the-memory-allocation-out-of-dc.patch
@@ -0,0 +1,96 @@
+From 7c34575c85d40e203f3b18ccb63e526d978b1c78 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior
+Date: Thu, 21 Sep 2023 16:15:15 +0200
+Subject: [PATCH 013/195] drm/amd/display: Move the memory allocation out of
+ dcn21_validate_bandwidth_fp().
+
+dcn21_validate_bandwidth_fp() is invoked while FPU access has been
+enabled. FPU access requires disabling preemption even on PREEMPT_RT.
+It is not possible to allocate memory with disabled preemption even with
+GFP_ATOMIC on PREEMPT_RT.
+
+Move the memory allocation before FPU access is enabled.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=217928
+Link: https://lore.kernel.org/r/20230921141516.520471-5-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 10 +++++++++-
+ drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c | 7 ++-----
+ drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h | 5 ++---
+ 3 files changed, 13 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+index d1a25fe6c44f..5674c3450fc3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+@@ -953,9 +953,17 @@ static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ bool fast_validate)
+ {
+ bool voltage_supported;
++ display_e2e_pipe_params_st *pipes;
++
++ pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
++ if (!pipes)
++ return false;
++
+ DC_FP_START();
+- voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate);
++ voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes);
+ DC_FP_END();
++
++ kfree(pipes);
+ return voltage_supported;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+index 8a5a038fd855..89d4e969cfd8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+@@ -2311,9 +2311,8 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+ }
+
+-bool dcn21_validate_bandwidth_fp(struct dc *dc,
+- struct dc_state *context,
+- bool fast_validate)
++bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
++ bool fast_validate, display_e2e_pipe_params_st *pipes)
+ {
+ bool out = false;
+
+@@ -2322,7 +2321,6 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc,
+ int vlevel = 0;
+ int pipe_split_from[MAX_PIPES];
+ int pipe_cnt = 0;
+- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ BW_VAL_TRACE_COUNT();
+@@ -2362,7 +2360,6 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc,
+ out = false;
+
+ validate_out:
+- kfree(pipes);
+
+ BW_VAL_TRACE_FINISH();
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
+index c51badf7b68a..a81a0b9e6884 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
+@@ -77,9 +77,8 @@ int dcn21_populate_dml_pipes_from_context(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate);
+-bool dcn21_validate_bandwidth_fp(struct dc *dc,
+- struct dc_state *context,
+- bool fast_validate);
++bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, bool
++ fast_validate, display_e2e_pipe_params_st *pipes);
+ void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+
+ void dcn21_clk_mgr_set_bw_params_wm_table(struct clk_bw_params *bw_params);
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0014-drm-amd-display-Move-the-memory-allocation-out-of-dc.patch b/buildroot-external/patches/linux/0014-drm-amd-display-Move-the-memory-allocation-out-of-dc.patch
new file mode 100644
index 00000000..49f66a4a
--- /dev/null
+++ b/buildroot-external/patches/linux/0014-drm-amd-display-Move-the-memory-allocation-out-of-dc.patch
@@ -0,0 +1,130 @@
+From bf3d7bb0ed627ca816eb490b7ac2b26ec02e63d5 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior
+Date: Thu, 21 Sep 2023 16:15:16 +0200
+Subject: [PATCH 014/195] drm/amd/display: Move the memory allocation out of
+ dcn20_validate_bandwidth_fp().
+
+dcn20_validate_bandwidth_fp() is invoked while FPU access has been
+enabled. FPU access requires disabling preemption even on PREEMPT_RT.
+It is not possible to allocate memory with disabled preemption even with
+GFP_ATOMIC on PREEMPT_RT.
+
+Move the memory allocation before FPU access is enabled.
+To preserve previous "clean" state of "pipes" add a memset() before the
+second invocation of dcn20_validate_bandwidth_internal() where the
+variable is used.
+
+Link: https://lore.kernel.org/r/20230921141516.520471-6-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ .../drm/amd/display/dc/dcn20/dcn20_resource.c | 10 +++++++++-
+ .../gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c | 16 +++++++---------
+ .../gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h | 5 ++---
+ 3 files changed, 18 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+index d587f807dfd7..5036a3e60832 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+@@ -2141,9 +2141,17 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ bool fast_validate)
+ {
+ bool voltage_supported;
++ display_e2e_pipe_params_st *pipes;
++
++ pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
++ if (!pipes)
++ return false;
++
+ DC_FP_START();
+- voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
++ voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes);
+ DC_FP_END();
++
++ kfree(pipes);
+ return voltage_supported;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+index 89d4e969cfd8..68970d6cf031 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+@@ -2018,7 +2018,7 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
+ }
+
+ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context,
+- bool fast_validate)
++ bool fast_validate, display_e2e_pipe_params_st *pipes)
+ {
+ bool out = false;
+
+@@ -2027,7 +2027,6 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
+ int vlevel = 0;
+ int pipe_split_from[MAX_PIPES];
+ int pipe_cnt = 0;
+- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ BW_VAL_TRACE_COUNT();
+@@ -2062,16 +2061,14 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
+ out = false;
+
+ validate_out:
+- kfree(pipes);
+
+ BW_VAL_TRACE_FINISH();
+
+ return out;
+ }
+
+-bool dcn20_validate_bandwidth_fp(struct dc *dc,
+- struct dc_state *context,
+- bool fast_validate)
++bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
++ bool fast_validate, display_e2e_pipe_params_st *pipes)
+ {
+ bool voltage_supported = false;
+ bool full_pstate_supported = false;
+@@ -2090,11 +2087,11 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc,
+ ASSERT(context != dc->current_state);
+
+ if (fast_validate) {
+- return dcn20_validate_bandwidth_internal(dc, context, true);
++ return dcn20_validate_bandwidth_internal(dc, context, true, pipes);
+ }
+
+ // Best case, we support full UCLK switch latency
+- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
++ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
+ full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
+
+ if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
+@@ -2106,7 +2103,8 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc,
+ // Fallback: Try to only support G6 temperature read latency
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
+
+- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
++ memset(pipes, 0, dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st));
++ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
+ dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
+
+ if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
+index a81a0b9e6884..b6c34198ddc8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
+@@ -61,9 +61,8 @@ void dcn20_update_bounding_box(struct dc *dc,
+ unsigned int num_states);
+ void dcn20_patch_bounding_box(struct dc *dc,
+ struct _vcs_dpi_soc_bounding_box_st *bb);
+-bool dcn20_validate_bandwidth_fp(struct dc *dc,
+- struct dc_state *context,
+- bool fast_validate);
++bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
++ bool fast_validate, display_e2e_pipe_params_st *pipes);
+ void dcn20_fpu_set_wm_ranges(int i,
+ struct pp_smu_wm_range_sets *ranges,
+ struct _vcs_dpi_soc_bounding_box_st *loaded_bb);
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0009-net-Avoid-the-IPI-to-free-the.patch b/buildroot-external/patches/linux/0015-net-Avoid-the-IPI-to-free-the.patch
similarity index 76%
rename from buildroot-external/patches/linux/0009-net-Avoid-the-IPI-to-free-the.patch
rename to buildroot-external/patches/linux/0015-net-Avoid-the-IPI-to-free-the.patch
index b00d169f..d3528eb7 100644
--- a/buildroot-external/patches/linux/0009-net-Avoid-the-IPI-to-free-the.patch
+++ b/buildroot-external/patches/linux/0015-net-Avoid-the-IPI-to-free-the.patch
@@ -1,7 +1,7 @@
-From 37205f0ce3bef04671d958f03e1460dcaeed41b7 Mon Sep 17 00:00:00 2001
+From 30de1da993f600f4771b3f3724ce0667f66ac69b Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior
Date: Mon, 15 Aug 2022 17:29:50 +0200
-Subject: [PATCH 09/62] net: Avoid the IPI to free the
+Subject: [PATCH 015/195] net: Avoid the IPI to free the
skb_attempt_defer_free() collects a skbs, which was allocated on a
remote CPU, on a per-CPU list. These skbs are either freed on that
@@ -19,15 +19,15 @@ To void all this, schedule the deferred clean up from a worker.
Signed-off-by: Sebastian Andrzej Siewior
---
include/linux/netdevice.h | 4 ++++
- net/core/dev.c | 37 ++++++++++++++++++++++++++++---------
+ net/core/dev.c | 39 ++++++++++++++++++++++++++++++---------
net/core/skbuff.c | 7 ++++++-
- 3 files changed, 38 insertions(+), 10 deletions(-)
+ 3 files changed, 40 insertions(+), 10 deletions(-)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
-index 0373e0935990..55d698367883 100644
+index b8e60a20416b..ffa5248a90e2 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -3169,7 +3169,11 @@ struct softnet_data {
+@@ -3258,7 +3258,11 @@ struct softnet_data {
int defer_count;
int defer_ipi_scheduled;
struct sk_buff *defer_list;
@@ -40,10 +40,10 @@ index 0373e0935990..55d698367883 100644
static inline void input_queue_head_incr(struct softnet_data *sd)
diff --git a/net/core/dev.c b/net/core/dev.c
-index 29ae6265a408..6a13043ff0f3 100644
+index e480afb50d4c..227338b8cda8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4621,15 +4621,6 @@ static void rps_trigger_softirq(void *data)
+@@ -4705,15 +4705,6 @@ static void rps_trigger_softirq(void *data)
#endif /* CONFIG_RPS */
@@ -57,13 +57,14 @@ index 29ae6265a408..6a13043ff0f3 100644
-}
-
/*
- * Check if this softnet_data structure is another cpu one
- * If yes, queue it to our IPI list and return 1
-@@ -6687,6 +6678,30 @@ static void skb_defer_free_flush(struct softnet_data *sd)
+ * After we queued a packet into sd->input_pkt_queue,
+ * we need to make sure this queue is serviced soon.
+@@ -6682,6 +6673,32 @@ static void skb_defer_free_flush(struct softnet_data *sd)
}
}
+#ifndef CONFIG_PREEMPT_RT
++
+/* Called from hardirq (IPI) context */
+static void trigger_rx_softirq(void *data)
+{
@@ -85,12 +86,13 @@ index 29ae6265a408..6a13043ff0f3 100644
+ skb_defer_free_flush(sd);
+ local_bh_enable();
+}
++
+#endif
+
- static __latent_entropy void net_rx_action(struct softirq_action *h)
+ static int napi_threaded_poll(void *data)
{
- struct softnet_data *sd = this_cpu_ptr(&softnet_data);
-@@ -11438,7 +11453,11 @@ static int __init net_dev_init(void)
+ struct napi_struct *napi = data;
+@@ -11606,7 +11623,11 @@ static int __init net_dev_init(void)
INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
sd->cpu = i;
#endif
@@ -103,10 +105,10 @@ index 29ae6265a408..6a13043ff0f3 100644
init_gro_hash(&sd->backlog);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index 8a819d0a7bfb..424ad963fa0c 100644
+index 011d69029112..9a9fbe18bf2f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
-@@ -6682,6 +6682,11 @@ nodefer: __kfree_skb(skb);
+@@ -6844,8 +6844,13 @@ nodefer: __kfree_skb(skb);
/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
* if we are unlucky enough (this seems very unlikely).
*/
@@ -119,6 +121,8 @@ index 8a819d0a7bfb..424ad963fa0c 100644
+#endif
+ }
}
+
+ static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,
--
2.43.0
diff --git a/buildroot-external/patches/linux/0016-tpm_tis-fix-stall-after-iowrite-s.patch b/buildroot-external/patches/linux/0016-tpm_tis-fix-stall-after-iowrite-s.patch
deleted file mode 100644
index 6bc373e1..00000000
--- a/buildroot-external/patches/linux/0016-tpm_tis-fix-stall-after-iowrite-s.patch
+++ /dev/null
@@ -1,81 +0,0 @@
-From a61678645efc9d4aa757b038d91bbef571c3ba17 Mon Sep 17 00:00:00 2001
-From: Haris Okanovic
-Date: Tue, 15 Aug 2017 15:13:08 -0500
-Subject: [PATCH 16/62] tpm_tis: fix stall after iowrite*()s
-
-ioread8() operations to TPM MMIO addresses can stall the cpu when
-immediately following a sequence of iowrite*()'s to the same region.
-
-For example, cyclitest measures ~400us latency spikes when a non-RT
-usermode application communicates with an SPI-based TPM chip (Intel Atom
-E3940 system, PREEMPT_RT kernel). The spikes are caused by a
-stalling ioread8() operation following a sequence of 30+ iowrite8()s to
-the same address. I believe this happens because the write sequence is
-buffered (in cpu or somewhere along the bus), and gets flushed on the
-first LOAD instruction (ioread*()) that follows.
-
-The enclosed change appears to fix this issue: read the TPM chip's
-access register (status code) after every iowrite*() operation to
-amortize the cost of flushing data to chip across multiple instructions.
-
-Signed-off-by: Haris Okanovic
-Signed-off-by: Sebastian Andrzej Siewior
-Signed-off-by: Thomas Gleixner
----
- drivers/char/tpm/tpm_tis.c | 29 +++++++++++++++++++++++++++--
- 1 file changed, 27 insertions(+), 2 deletions(-)
-
-diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
-index 0d084d6652c4..5d620322bdc2 100644
---- a/drivers/char/tpm/tpm_tis.c
-+++ b/drivers/char/tpm/tpm_tis.c
-@@ -50,6 +50,31 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da
- return container_of(data, struct tpm_tis_tcg_phy, priv);
- }
-
-+#ifdef CONFIG_PREEMPT_RT
-+/*
-+ * Flushes previous write operations to chip so that a subsequent
-+ * ioread*()s won't stall a cpu.
-+ */
-+static inline void tpm_tis_flush(void __iomem *iobase)
-+{
-+ ioread8(iobase + TPM_ACCESS(0));
-+}
-+#else
-+#define tpm_tis_flush(iobase) do { } while (0)
-+#endif
-+
-+static inline void tpm_tis_iowrite8(u8 b, void __iomem *iobase, u32 addr)
-+{
-+ iowrite8(b, iobase + addr);
-+ tpm_tis_flush(iobase);
-+}
-+
-+static inline void tpm_tis_iowrite32(u32 b, void __iomem *iobase, u32 addr)
-+{
-+ iowrite32(b, iobase + addr);
-+ tpm_tis_flush(iobase);
-+}
-+
- static int interrupts = -1;
- module_param(interrupts, int, 0444);
- MODULE_PARM_DESC(interrupts, "Enable interrupts");
-@@ -202,12 +227,12 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
- switch (io_mode) {
- case TPM_TIS_PHYS_8:
- while (len--)
-- iowrite8(*value++, phy->iobase + addr);
-+ tpm_tis_iowrite8(*value++, phy->iobase, addr);
- break;
- case TPM_TIS_PHYS_16:
- return -EINVAL;
- case TPM_TIS_PHYS_32:
-- iowrite32(le32_to_cpu(*((__le32 *)value)), phy->iobase + addr);
-+ tpm_tis_iowrite32(le32_to_cpu(*((__le32 *)value)), phy->iobase, addr);
- break;
- }
-
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0010-x86-Allow-to-enable-RT.patch b/buildroot-external/patches/linux/0016-x86-Allow-to-enable-RT.patch
similarity index 72%
rename from buildroot-external/patches/linux/0010-x86-Allow-to-enable-RT.patch
rename to buildroot-external/patches/linux/0016-x86-Allow-to-enable-RT.patch
index d4ba1c17..3065153b 100644
--- a/buildroot-external/patches/linux/0010-x86-Allow-to-enable-RT.patch
+++ b/buildroot-external/patches/linux/0016-x86-Allow-to-enable-RT.patch
@@ -1,7 +1,7 @@
-From bd3f264eeb33c0602b3f9d66a603e5a2e8f9d9ee Mon Sep 17 00:00:00 2001
+From c17f2c393b160199e64c464189661283345d2e73 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior
Date: Wed, 7 Aug 2019 18:15:38 +0200
-Subject: [PATCH 10/62] x86: Allow to enable RT
+Subject: [PATCH 016/195] x86: Allow to enable RT
Allow to select RT.
@@ -12,13 +12,13 @@ Signed-off-by: Thomas Gleixner
1 file changed, 1 insertion(+)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 4c9bfc4be58d..f7f81e3012cc 100644
+index fe3292e310d4..984068efd48b 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -27,6 +27,7 @@ config X86_64
- # Options that are inherently 64-bit kernel only:
+@@ -28,6 +28,7 @@ config X86_64
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
+ select ARCH_SUPPORTS_PER_VMA_LOCK
+ select ARCH_SUPPORTS_RT
select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_ARCH_SOFT_DIRTY
diff --git a/buildroot-external/patches/linux/0011-x86-Enable-RT-also-on-32bit.patch b/buildroot-external/patches/linux/0017-x86-Enable-RT-also-on-32bit.patch
similarity index 75%
rename from buildroot-external/patches/linux/0011-x86-Enable-RT-also-on-32bit.patch
rename to buildroot-external/patches/linux/0017-x86-Enable-RT-also-on-32bit.patch
index f3fef621..834fea53 100644
--- a/buildroot-external/patches/linux/0011-x86-Enable-RT-also-on-32bit.patch
+++ b/buildroot-external/patches/linux/0017-x86-Enable-RT-also-on-32bit.patch
@@ -1,7 +1,7 @@
-From 4c22c4a6e79603a30f6b875c8fd65efdad35ac8f Mon Sep 17 00:00:00 2001
+From 44e5f60ede7f2407fe12fa477b09f1a156e0634b Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior
Date: Thu, 7 Nov 2019 17:49:20 +0100
-Subject: [PATCH 11/62] x86: Enable RT also on 32bit
+Subject: [PATCH 017/195] x86: Enable RT also on 32bit
Signed-off-by: Sebastian Andrzej Siewior
Signed-off-by: Thomas Gleixner
@@ -10,18 +10,18 @@ Signed-off-by: Thomas Gleixner
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index f7f81e3012cc..c9bed9c69423 100644
+index 984068efd48b..1b445e289190 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -27,7 +27,6 @@ config X86_64
- # Options that are inherently 64-bit kernel only:
+@@ -28,7 +28,6 @@ config X86_64
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
+ select ARCH_SUPPORTS_PER_VMA_LOCK
- select ARCH_SUPPORTS_RT
select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_ARCH_SOFT_DIRTY
select MODULES_USE_ELF_RELA
-@@ -114,6 +113,7 @@ config X86
+@@ -118,6 +117,7 @@ config X86
select ARCH_USES_CFI_TRAPS if X86_64 && CFI_CLANG
select ARCH_SUPPORTS_LTO_CLANG
select ARCH_SUPPORTS_LTO_CLANG_THIN
diff --git a/buildroot-external/patches/linux/0018-locking-lockdep-Remove-lockdep_init_map_crosslock.patch b/buildroot-external/patches/linux/0018-locking-lockdep-Remove-lockdep_init_map_crosslock.patch
deleted file mode 100644
index 71ee0238..00000000
--- a/buildroot-external/patches/linux/0018-locking-lockdep-Remove-lockdep_init_map_crosslock.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From 0b833bd5bb0c55f681cf80dc6eaf59ab1cea7b51 Mon Sep 17 00:00:00 2001
-From: Sebastian Andrzej Siewior
-Date: Fri, 11 Mar 2022 17:44:57 +0100
-Subject: [PATCH 18/62] locking/lockdep: Remove lockdep_init_map_crosslock.
-
-The cross-release bits have been removed, lockdep_init_map_crosslock() is
-a leftover.
-
-Remove lockdep_init_map_crosslock.
-
-Signed-off-by: Sebastian Andrzej Siewior
-Reviewed-by: Waiman Long
-Link: https://lore.kernel.org/r/20220311164457.46461-1-bigeasy@linutronix.de
-Link: https://lore.kernel.org/r/YqITgY+2aPITu96z@linutronix.de
----
- include/linux/lockdep.h | 1 -
- 1 file changed, 1 deletion(-)
-
-diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
-index 1f1099dac3f0..1023f349af71 100644
---- a/include/linux/lockdep.h
-+++ b/include/linux/lockdep.h
-@@ -435,7 +435,6 @@ enum xhlock_context_t {
- XHLOCK_CTX_NR,
- };
-
--#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
- /*
- * To initialize a lockdep_map statically use this macro.
- * Note that _name must not be NULL.
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0018-sched-rt-Don-t-try-push-tasks-if-there-are-none.patch b/buildroot-external/patches/linux/0018-sched-rt-Don-t-try-push-tasks-if-there-are-none.patch
new file mode 100644
index 00000000..886ab5e4
--- /dev/null
+++ b/buildroot-external/patches/linux/0018-sched-rt-Don-t-try-push-tasks-if-there-are-none.patch
@@ -0,0 +1,63 @@
+From f4ac796216884ec5942d1d74fde86d1ed5f1f695 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior
+Date: Tue, 1 Aug 2023 17:26:48 +0200
+Subject: [PATCH 018/195] sched/rt: Don't try push tasks if there are none.
+
+I have a RT task X at a high priority and cyclictest on each CPU with
+lower priority than X's. If X is active and each CPU wakes their own
+cylictest thread then it ends in a longer rto_push storm.
+A random CPU determines via balance_rt() that the CPU on which X is
+running needs to push tasks. X has the highest priority, cyclictest is
+next in line so there is nothing that can be done since the task with
+the higher priority is not touched.
+
+tell_cpu_to_push() increments rto_loop_next and schedules
+rto_push_irq_work_func() on X's CPU. The other CPUs also increment the
+loop counter and do the same. Once rto_push_irq_work_func() is active it
+does nothing because it has _no_ pushable tasks on its runqueue. Then
+checks rto_next_cpu() and decides to queue irq_work on the local CPU
+because another CPU requested a push by incrementing the counter.
+
+I have traces where ~30 CPUs request this ~3 times each before it
+finally ends. This greatly increases X's runtime while X isn't making
+much progress.
+
+Teach rto_next_cpu() to only return CPUs which also have tasks on their
+runqueue which can be pushed away. This does not reduce the
+tell_cpu_to_push() invocations (rto_loop_next counter increments) but
+reduces the amount of issued rto_push_irq_work_func() if nothing can be
+done. As the result the overloaded CPU is blocked less often.
+
+There are still cases where the "same job" is repeated several times
+(for instance the current CPU needs to resched but didn't yet because
+the irq-work is repeated a few times and so the old task remains on the
+CPU) but the majority of request end in tell_cpu_to_push() before an IPI
+is issued.
+
+Reviewed-by: "Steven Rostedt (Google)"
+Link: https://lore.kernel.org/r/20230801152648._y603AS_@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ kernel/sched/rt.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 904dd8534597..563161845e79 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -2249,8 +2249,11 @@ static int rto_next_cpu(struct root_domain *rd)
+
+ rd->rto_cpu = cpu;
+
+- if (cpu < nr_cpu_ids)
++ if (cpu < nr_cpu_ids) {
++ if (!has_pushable_tasks(cpu_rq(cpu)))
++ continue;
+ return cpu;
++ }
+
+ rd->rto_cpu = -1;
+
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0019-printk-Bring-back-the-RT-bits.patch b/buildroot-external/patches/linux/0019-printk-Bring-back-the-RT-bits.patch
deleted file mode 100644
index 762fe157..00000000
--- a/buildroot-external/patches/linux/0019-printk-Bring-back-the-RT-bits.patch
+++ /dev/null
@@ -1,1233 +0,0 @@
-From 353209a61511cccc1372d883ffe41fcff355055b Mon Sep 17 00:00:00 2001
-From: Sebastian Andrzej Siewior
-Date: Tue, 19 Jul 2022 20:08:01 +0200
-Subject: [PATCH 19/62] printk: Bring back the RT bits.
-
-This is a revert of the commits:
-| 07a22b61946f0 Revert "printk: add functions to prefer direct printing"
-| 5831788afb17b Revert "printk: add kthread console printers"
-| 2d9ef940f89e0 Revert "printk: extend console_lock for per-console locking"
-| 007eeab7e9f03 Revert "printk: remove @console_locked"
-| 05c96b3713aa2 Revert "printk: Block console kthreads when direct printing will be required"
-| 20fb0c8272bbb Revert "printk: Wait for the global console lock when the system is going down"
-
-which is needed for the atomic consoles which are used on PREEMPT_RT.
-
-Signed-off-by: Sebastian Andrzej Siewior
----
- drivers/tty/sysrq.c | 2 +
- include/linux/console.h | 17 ++
- include/linux/printk.h | 15 +
- kernel/hung_task.c | 11 +-
- kernel/panic.c | 4 +
- kernel/printk/internal.h | 2 +
- kernel/printk/printk.c | 586 ++++++++++++++++++++++++++++++++----
- kernel/printk/printk_safe.c | 32 ++
- kernel/rcu/tree_stall.h | 2 +
- kernel/reboot.c | 16 +-
- kernel/watchdog.c | 4 +
- kernel/watchdog_hld.c | 4 +
- 12 files changed, 636 insertions(+), 59 deletions(-)
-
-diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
-index 248067197287..0db9dad8c99f 100644
---- a/drivers/tty/sysrq.c
-+++ b/drivers/tty/sysrq.c
-@@ -582,6 +582,7 @@ void __handle_sysrq(int key, bool check_mask)
-
- rcu_sysrq_start();
- rcu_read_lock();
-+ printk_prefer_direct_enter();
- /*
- * Raise the apparent loglevel to maximum so that the sysrq header
- * is shown to provide the user with positive feedback. We do not
-@@ -623,6 +624,7 @@ void __handle_sysrq(int key, bool check_mask)
- pr_cont("\n");
- console_loglevel = orig_log_level;
- }
-+ printk_prefer_direct_exit();
- rcu_read_unlock();
- rcu_sysrq_end();
-
-diff --git a/include/linux/console.h b/include/linux/console.h
-index 8c1686e2c233..143653090c48 100644
---- a/include/linux/console.h
-+++ b/include/linux/console.h
-@@ -16,6 +16,7 @@
-
- #include
- #include
-+#include
-
- struct vc_data;
- struct console_font_op;
-@@ -153,6 +154,22 @@ struct console {
- uint ospeed;
- u64 seq;
- unsigned long dropped;
-+ struct task_struct *thread;
-+ bool blocked;
-+
-+ /*
-+ * The per-console lock is used by printing kthreads to synchronize
-+ * this console with callers of console_lock(). This is necessary in
-+ * order to allow printing kthreads to run in parallel to each other,
-+ * while each safely accessing the @blocked field and synchronizing
-+ * against direct printing via console_lock/console_unlock.
-+ *
-+ * Note: For synchronizing against direct printing via
-+ * console_trylock/console_unlock, see the static global
-+ * variable @console_kthreads_active.
-+ */
-+ struct mutex lock;
-+
- void *data;
- struct console *next;
- };
-diff --git a/include/linux/printk.h b/include/linux/printk.h
-index 8c81806c2e99..f8c4e4fa6d7d 100644
---- a/include/linux/printk.h
-+++ b/include/linux/printk.h
-@@ -168,6 +168,9 @@ extern void __printk_safe_exit(void);
- */
- #define printk_deferred_enter __printk_safe_enter
- #define printk_deferred_exit __printk_safe_exit
-+extern void printk_prefer_direct_enter(void);
-+extern void printk_prefer_direct_exit(void);
-+extern void try_block_console_kthreads(int timeout_ms);
-
- /*
- * Please don't use printk_ratelimit(), because it shares ratelimiting state
-@@ -219,6 +222,18 @@ static inline void printk_deferred_exit(void)
- {
- }
-
-+static inline void printk_prefer_direct_enter(void)
-+{
-+}
-+
-+static inline void printk_prefer_direct_exit(void)
-+{
-+}
-+
-+static inline void try_block_console_kthreads(int timeout_ms)
-+{
-+}
-+
- static inline int printk_ratelimit(void)
- {
- return 0;
-diff --git a/kernel/hung_task.c b/kernel/hung_task.c
-index c71889f3f3fc..e2d2344cb9f4 100644
---- a/kernel/hung_task.c
-+++ b/kernel/hung_task.c
-@@ -127,6 +127,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
- * complain:
- */
- if (sysctl_hung_task_warnings) {
-+ printk_prefer_direct_enter();
-+
- if (sysctl_hung_task_warnings > 0)
- sysctl_hung_task_warnings--;
- pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
-@@ -142,6 +144,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
-
- if (sysctl_hung_task_all_cpu_backtrace)
- hung_task_show_all_bt = true;
-+
-+ printk_prefer_direct_exit();
- }
-
- touch_nmi_watchdog();
-@@ -212,12 +216,17 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
- }
- unlock:
- rcu_read_unlock();
-- if (hung_task_show_lock)
-+ if (hung_task_show_lock) {
-+ printk_prefer_direct_enter();
- debug_show_all_locks();
-+ printk_prefer_direct_exit();
-+ }
-
- if (hung_task_show_all_bt) {
- hung_task_show_all_bt = false;
-+ printk_prefer_direct_enter();
- trigger_all_cpu_backtrace();
-+ printk_prefer_direct_exit();
- }
-
- if (hung_task_call_panic)
-diff --git a/kernel/panic.c b/kernel/panic.c
-index 63e94f3bd8dc..88cd873c7c30 100644
---- a/kernel/panic.c
-+++ b/kernel/panic.c
-@@ -653,6 +653,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
- {
- disable_trace_on_warning();
-
-+ printk_prefer_direct_enter();
-+
- if (file)
- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
- raw_smp_processor_id(), current->pid, file, line,
-@@ -681,6 +683,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
-
- /* Just a warning, don't kill lockdep. */
- add_taint(taint, LOCKDEP_STILL_OK);
-+
-+ printk_prefer_direct_exit();
- }
-
- #ifndef __WARN_FLAGS
-diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
-index d947ca6c84f9..e7d8578860ad 100644
---- a/kernel/printk/internal.h
-+++ b/kernel/printk/internal.h
-@@ -20,6 +20,8 @@ enum printk_info_flags {
- LOG_CONT = 8, /* text is a fragment of a continuation line */
- };
-
-+extern bool block_console_kthreads;
-+
- __printf(4, 0)
- int vprintk_store(int facility, int level,
- const struct dev_printk_info *dev_info,
-diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
-index cc53fb77f77c..e9f9b66608a0 100644
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -220,6 +220,36 @@ int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
- }
- #endif /* CONFIG_PRINTK && CONFIG_SYSCTL */
-
-+/*
-+ * Used to synchronize printing kthreads against direct printing via
-+ * console_trylock/console_unlock.
-+ *
-+ * Values:
-+ * -1 = console kthreads atomically blocked (via global trylock)
-+ * 0 = no kthread printing, console not locked (via trylock)
-+ * >0 = kthread(s) actively printing
-+ *
-+ * Note: For synchronizing against direct printing via
-+ * console_lock/console_unlock, see the @lock variable in
-+ * struct console.
-+ */
-+static atomic_t console_kthreads_active = ATOMIC_INIT(0);
-+
-+#define console_kthreads_atomic_tryblock() \
-+ (atomic_cmpxchg(&console_kthreads_active, 0, -1) == 0)
-+#define console_kthreads_atomic_unblock() \
-+ atomic_cmpxchg(&console_kthreads_active, -1, 0)
-+#define console_kthreads_atomically_blocked() \
-+ (atomic_read(&console_kthreads_active) == -1)
-+
-+#define console_kthread_printing_tryenter() \
-+ atomic_inc_unless_negative(&console_kthreads_active)
-+#define console_kthread_printing_exit() \
-+ atomic_dec(&console_kthreads_active)
-+
-+/* Block console kthreads to avoid processing new messages. */
-+bool block_console_kthreads;
-+
- /*
- * Helper macros to handle lockdep when locking/unlocking console_sem. We use
- * macros instead of functions so that _RET_IP_ contains useful information.
-@@ -268,14 +298,49 @@ static bool panic_in_progress(void)
- }
-
- /*
-- * This is used for debugging the mess that is the VT code by
-- * keeping track if we have the console semaphore held. It's
-- * definitely not the perfect debug tool (we don't know if _WE_
-- * hold it and are racing, but it helps tracking those weird code
-- * paths in the console code where we end up in places I want
-- * locked without the console semaphore held).
-+ * Tracks whether kthread printers are all blocked. A value of true implies
-+ * that the console is locked via console_lock() or the console is suspended.
-+ * Writing to this variable requires holding @console_sem.
-+ */
-+static bool console_kthreads_blocked;
-+
-+/*
-+ * Block all kthread printers from a schedulable context.
-+ *
-+ * Requires holding @console_sem.
-+ */
-+static void console_kthreads_block(void)
-+{
-+ struct console *con;
-+
-+ for_each_console(con) {
-+ mutex_lock(&con->lock);
-+ con->blocked = true;
-+ mutex_unlock(&con->lock);
-+ }
-+
-+ console_kthreads_blocked = true;
-+}
-+
-+/*
-+ * Unblock all kthread printers from a schedulable context.
-+ *
-+ * Requires holding @console_sem.
- */
--static int console_locked, console_suspended;
-+static void console_kthreads_unblock(void)
-+{
-+ struct console *con;
-+
-+ for_each_console(con) {
-+ mutex_lock(&con->lock);
-+ con->blocked = false;
-+ mutex_unlock(&con->lock);
-+ }
-+
-+ console_kthreads_blocked = false;
-+}
-+
-+static int console_suspended;
-
- /*
- * Array of consoles built from command line options (console=)
-@@ -358,7 +423,75 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
- /* syslog_lock protects syslog_* variables and write access to clear_seq. */
- static DEFINE_MUTEX(syslog_lock);
-
-+/*
-+ * A flag to signify if printk_activate_kthreads() has already started the
-+ * kthread printers. If true, any later registered consoles must start their
-+ * own kthread directly. The flag is write protected by the console_lock.
-+ */
-+static bool printk_kthreads_available;
-+
- #ifdef CONFIG_PRINTK
-+static atomic_t printk_prefer_direct = ATOMIC_INIT(0);
-+
-+/**
-+ * printk_prefer_direct_enter - cause printk() calls to attempt direct
-+ * printing to all enabled consoles
-+ *
-+ * Since it is not possible to call into the console printing code from any
-+ * context, there is no guarantee that direct printing will occur.
-+ *
-+ * This globally effects all printk() callers.
-+ *
-+ * Context: Any context.
-+ */
-+void printk_prefer_direct_enter(void)
-+{
-+ atomic_inc(&printk_prefer_direct);
-+}
-+
-+/**
-+ * printk_prefer_direct_exit - restore printk() behavior
-+ *
-+ * Context: Any context.
-+ */
-+void printk_prefer_direct_exit(void)
-+{
-+ WARN_ON(atomic_dec_if_positive(&printk_prefer_direct) < 0);
-+}
-+
-+/*
-+ * Calling printk() always wakes kthread printers so that they can
-+ * flush the new message to their respective consoles. Also, if direct
-+ * printing is allowed, printk() tries to flush the messages directly.
-+ *
-+ * Direct printing is allowed in situations when the kthreads
-+ * are not available or the system is in a problematic state.
-+ *
-+ * See the implementation about possible races.
-+ */
-+static inline bool allow_direct_printing(void)
-+{
-+ /*
-+ * Checking kthread availability is a possible race because the
-+ * kthread printers can become permanently disabled during runtime.
-+ * However, doing that requires holding the console_lock, so any
-+ * pending messages will be direct printed by console_unlock().
-+ */
-+ if (!printk_kthreads_available)
-+ return true;
-+
-+ /*
-+ * Prefer direct printing when the system is in a problematic state.
-+ * The context that sets this state will always see the updated value.
-+ * The other contexts do not care. Anyway, direct printing is just a
-+ * best effort. The direct output is only possible when console_lock
-+ * is not already taken and no kthread printers are actively printing.
-+ */
-+ return (system_state > SYSTEM_RUNNING ||
-+ oops_in_progress ||
-+ atomic_read(&printk_prefer_direct));
-+}
-+
- DECLARE_WAIT_QUEUE_HEAD(log_wait);
- /* All 3 protected by @syslog_lock. */
- /* the next printk record to read by syslog(READ) or /proc/kmsg */
-@@ -2249,10 +2382,10 @@ asmlinkage int vprintk_emit(int facility, int level,
- printed_len = vprintk_store(facility, level, dev_info, fmt, args);
-
- /* If called from the scheduler, we can not call up(). */
-- if (!in_sched) {
-+ if (!in_sched && allow_direct_printing()) {
- /*
- * The caller may be holding system-critical or
-- * timing-sensitive locks. Disable preemption during
-+ * timing-sensitive locks. Disable preemption during direct
- * printing of all remaining records to all consoles so that
- * this context can return as soon as possible. Hopefully
- * another printk() caller will take over the printing.
-@@ -2300,6 +2433,8 @@ EXPORT_SYMBOL(_printk);
- static bool pr_flush(int timeout_ms, bool reset_on_progress);
- static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
-
-+static void printk_start_kthread(struct console *con);
-+
- #else /* CONFIG_PRINTK */
-
- #define CONSOLE_LOG_MAX 0
-@@ -2334,6 +2469,8 @@ static void call_console_driver(struct console *con, const char *text, size_t le
- static bool suppress_message_printing(int level) { return false; }
- static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; }
- static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
-+static void printk_start_kthread(struct console *con) { }
-+static bool allow_direct_printing(void) { return true; }
-
- #endif /* CONFIG_PRINTK */
-
-@@ -2552,6 +2689,14 @@ static int console_cpu_notify(unsigned int cpu)
- /* If trylock fails, someone else is doing the printing */
- if (console_trylock())
- console_unlock();
-+ else {
-+ /*
-+ * If a new CPU comes online, the conditions for
-+ * printer_should_wake() may have changed for some
-+ * kthread printer with !CON_ANYTIME.
-+ */
-+ wake_up_klogd();
-+ }
- }
- return 0;
- }
-@@ -2594,7 +2739,7 @@ void console_lock(void)
- down_console_sem();
- if (console_suspended)
- return;
-- console_locked = 1;
-+ console_kthreads_block();
- console_may_schedule = 1;
- }
- EXPORT_SYMBOL(console_lock);
-@@ -2618,15 +2763,30 @@ int console_trylock(void)
- up_console_sem();
- return 0;
- }
-- console_locked = 1;
-+ if (!console_kthreads_atomic_tryblock()) {
-+ up_console_sem();
-+ return 0;
-+ }
- console_may_schedule = 0;
- return 1;
- }
- EXPORT_SYMBOL(console_trylock);
-
-+/*
-+ * This is used to help to make sure that certain paths within the VT code are
-+ * running with the console lock held. It is definitely not the perfect debug
-+ * tool (it is not known if the VT code is the task holding the console lock),
-+ * but it helps tracking those weird code paths in the console code such as
-+ * when the console is suspended: where the console is not locked but no
-+ * console printing may occur.
-+ *
-+ * Note: This returns true when the console is suspended but is not locked.
-+ * This is intentional because the VT code must consider that situation
-+ * the same as if the console was locked.
-+ */
- int is_console_locked(void)
- {
-- return console_locked;
-+ return (console_kthreads_blocked || atomic_read(&console_kthreads_active));
- }
- EXPORT_SYMBOL(is_console_locked);
-
-@@ -2636,12 +2796,9 @@ EXPORT_SYMBOL(is_console_locked);
- *
- * Requires the console_lock.
- */
--static inline bool console_is_usable(struct console *con)
-+static inline bool __console_is_usable(short flags)
- {
-- if (!(con->flags & CON_ENABLED))
-- return false;
--
-- if (!con->write)
-+ if (!(flags & CON_ENABLED))
- return false;
-
- /*
-@@ -2650,15 +2807,43 @@ static inline bool console_is_usable(struct console *con)
- * cope (CON_ANYTIME) don't call them until this CPU is officially up.
- */
- if (!cpu_online(raw_smp_processor_id()) &&
-- !(con->flags & CON_ANYTIME))
-+ !(flags & CON_ANYTIME))
- return false;
-
- return true;
- }
-
-+/*
-+ * Check if the given console is currently capable and allowed to print
-+ * records.
-+ *
-+ * Requires holding the console_lock.
-+ */
-+static inline bool console_is_usable(struct console *con)
-+{
-+ if (!con->write)
-+ return false;
-+
-+ return __console_is_usable(con->flags);
-+}
-+
- static void __console_unlock(void)
- {
-- console_locked = 0;
-+ /*
-+ * Depending on whether console_lock() or console_trylock() was used,
-+ * appropriately allow the kthread printers to continue.
-+ */
-+ if (console_kthreads_blocked)
-+ console_kthreads_unblock();
-+ else
-+ console_kthreads_atomic_unblock();
-+
-+ /*
-+ * New records may have arrived while the console was locked.
-+ * Wake the kthread printers to print them.
-+ */
-+ wake_up_klogd();
-+
- up_console_sem();
- }
-
-@@ -2676,17 +2861,19 @@ static void __console_unlock(void)
- *
- * @handover will be set to true if a printk waiter has taken over the
- * console_lock, in which case the caller is no longer holding the
-- * console_lock. Otherwise it is set to false.
-+ * console_lock. Otherwise it is set to false. A NULL pointer may be provided
-+ * to disable allowing the console_lock to be taken over by a printk waiter.
- *
- * Returns false if the given console has no next record to print, otherwise
- * true.
- *
-- * Requires the console_lock.
-+ * Requires the console_lock if @handover is non-NULL.
-+ * Requires con->lock otherwise.
- */
--static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
-- char *dropped_text, bool *handover)
-+static bool __console_emit_next_record(struct console *con, char *text, char *ext_text,
-+ char *dropped_text, bool *handover)
- {
-- static int panic_console_dropped;
-+ static atomic_t panic_console_dropped = ATOMIC_INIT(0);
- struct printk_info info;
- struct printk_record r;
- unsigned long flags;
-@@ -2695,7 +2882,8 @@ static bool console_emit_next_record(struct console *con, char *text, char *ext_
-
- prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
-
-- *handover = false;
-+ if (handover)
-+ *handover = false;
-
- if (!prb_read_valid(prb, con->seq, &r))
- return false;
-@@ -2703,7 +2891,8 @@ static bool console_emit_next_record(struct console *con, char *text, char *ext_
- if (con->seq != r.info->seq) {
- con->dropped += r.info->seq - con->seq;
- con->seq = r.info->seq;
-- if (panic_in_progress() && panic_console_dropped++ > 10) {
-+ if (panic_in_progress() &&
-+ atomic_fetch_inc_relaxed(&panic_console_dropped) > 10) {
- suppress_panic_printk = 1;
- pr_warn_once("Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.\n");
- }
-@@ -2725,31 +2914,61 @@ static bool console_emit_next_record(struct console *con, char *text, char *ext_
- len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
- }
-
-- /*
-- * While actively printing out messages, if another printk()
-- * were to occur on another CPU, it may wait for this one to
-- * finish. This task can not be preempted if there is a
-- * waiter waiting to take over.
-- *
-- * Interrupts are disabled because the hand over to a waiter
-- * must not be interrupted until the hand over is completed
-- * (@console_waiter is cleared).
-- */
-- printk_safe_enter_irqsave(flags);
-- console_lock_spinning_enable();
-+ if (handover) {
-+ /*
-+ * While actively printing out messages, if another printk()
-+ * were to occur on another CPU, it may wait for this one to
-+ * finish. This task can not be preempted if there is a
-+ * waiter waiting to take over.
-+ *
-+ * Interrupts are disabled because the hand over to a waiter
-+ * must not be interrupted until the hand over is completed
-+ * (@console_waiter is cleared).
-+ */
-+ printk_safe_enter_irqsave(flags);
-+ console_lock_spinning_enable();
-+
-+ /* don't trace irqsoff print latency */
-+ stop_critical_timings();
-+ }
-
-- stop_critical_timings(); /* don't trace print latency */
- call_console_driver(con, write_text, len, dropped_text);
-- start_critical_timings();
-
- con->seq++;
-
-- *handover = console_lock_spinning_disable_and_check();
-- printk_safe_exit_irqrestore(flags);
-+ if (handover) {
-+ start_critical_timings();
-+ *handover = console_lock_spinning_disable_and_check();
-+ printk_safe_exit_irqrestore(flags);
-+ }
- skip:
- return true;
- }
-
-+/*
-+ * Print a record for a given console, but allow another printk() caller to
-+ * take over the console_lock and continue printing.
-+ *
-+ * Requires the console_lock, but depending on @handover after the call, the
-+ * caller may no longer have the console_lock.
-+ *
-+ * See __console_emit_next_record() for argument and return details.
-+ */
-+static bool console_emit_next_record_transferable(struct console *con, char *text, char *ext_text,
-+ char *dropped_text, bool *handover)
-+{
-+ /*
-+ * Handovers are only supported if threaded printers are atomically
-+ * blocked. The context taking over the console_lock may be atomic.
-+ */
-+ if (!console_kthreads_atomically_blocked()) {
-+ *handover = false;
-+ handover = NULL;
-+ }
-+
-+ return __console_emit_next_record(con, text, ext_text, dropped_text, handover);
-+}
-+
- /*
- * Print out all remaining records to all consoles.
- *
-@@ -2768,8 +2987,8 @@ static bool console_emit_next_record(struct console *con, char *text, char *ext_
- * were flushed to all usable consoles. A returned false informs the caller
- * that everything was not flushed (either there were no usable consoles or
- * another context has taken over printing or it is a panic situation and this
-- * is not the panic CPU). Regardless the reason, the caller should assume it
-- * is not useful to immediately try again.
-+ * is not the panic CPU or direct printing is not preferred). Regardless the
-+ * reason, the caller should assume it is not useful to immediately try again.
- *
- * Requires the console_lock.
- */
-@@ -2786,6 +3005,10 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
- *handover = false;
-
- do {
-+ /* Let the kthread printers do the work if they can. */
-+ if (!allow_direct_printing())
-+ return false;
-+
- any_progress = false;
-
- for_each_console(con) {
-@@ -2797,13 +3020,11 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
-
- if (con->flags & CON_EXTENDED) {
- /* Extended consoles do not print "dropped messages". */
-- progress = console_emit_next_record(con, &text[0],
-- &ext_text[0], NULL,
-- handover);
-+ progress = console_emit_next_record_transferable(con, &text[0],
-+ &ext_text[0], NULL, handover);
- } else {
-- progress = console_emit_next_record(con, &text[0],
-- NULL, &dropped_text[0],
-- handover);
-+ progress = console_emit_next_record_transferable(con, &text[0],
-+ NULL, &dropped_text[0], handover);
- }
- if (*handover)
- return false;
-@@ -2918,10 +3139,13 @@ void console_unblank(void)
- if (oops_in_progress) {
- if (down_trylock_console_sem() != 0)
- return;
-+ if (!console_kthreads_atomic_tryblock()) {
-+ up_console_sem();
-+ return;
-+ }
- } else
- console_lock();
-
-- console_locked = 1;
- console_may_schedule = 0;
- for_each_console(c)
- if ((c->flags & CON_ENABLED) && c->unblank)
-@@ -3197,6 +3421,10 @@ void register_console(struct console *newcon)
- }
-
- newcon->dropped = 0;
-+ newcon->thread = NULL;
-+ newcon->blocked = true;
-+ mutex_init(&newcon->lock);
-+
- if (newcon->flags & CON_PRINTBUFFER) {
- /* Get a consistent copy of @syslog_seq. */
- mutex_lock(&syslog_lock);
-@@ -3206,6 +3434,10 @@ void register_console(struct console *newcon)
- /* Begin with next message. */
- newcon->seq = prb_next_seq(prb);
- }
-+
-+ if (printk_kthreads_available)
-+ printk_start_kthread(newcon);
-+
- console_unlock();
- console_sysfs_notify();
-
-@@ -3229,6 +3461,7 @@ EXPORT_SYMBOL(register_console);
-
- int unregister_console(struct console *console)
- {
-+ struct task_struct *thd;
- struct console *con;
- int res;
-
-@@ -3266,7 +3499,20 @@ int unregister_console(struct console *console)
- console_drivers->flags |= CON_CONSDEV;
-
- console->flags &= ~CON_ENABLED;
-+
-+ /*
-+ * console->thread can only be cleared under the console lock. But
-+ * stopping the thread must be done without the console lock. The
-+ * task that clears @thread is the task that stops the kthread.
-+ */
-+ thd = console->thread;
-+ console->thread = NULL;
-+
- console_unlock();
-+
-+ if (thd)
-+ kthread_stop(thd);
-+
- console_sysfs_notify();
-
- if (console->exit)
-@@ -3362,6 +3608,20 @@ static int __init printk_late_init(void)
- }
- late_initcall(printk_late_init);
-
-+static int __init printk_activate_kthreads(void)
-+{
-+ struct console *con;
-+
-+ console_lock();
-+ printk_kthreads_available = true;
-+ for_each_console(con)
-+ printk_start_kthread(con);
-+ console_unlock();
-+
-+ return 0;
-+}
-+early_initcall(printk_activate_kthreads);
-+
- #if defined CONFIG_PRINTK
- /* If @con is specified, only wait for that console. Otherwise wait for all. */
- static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress)
-@@ -3444,11 +3704,208 @@ static bool pr_flush(int timeout_ms, bool reset_on_progress)
- return __pr_flush(NULL, timeout_ms, reset_on_progress);
- }
-
-+static void __printk_fallback_preferred_direct(void)
-+{
-+ printk_prefer_direct_enter();
-+ pr_err("falling back to preferred direct printing\n");
-+ printk_kthreads_available = false;
-+}
-+
-+/*
-+ * Enter preferred direct printing, but never exit. Mark console threads as
-+ * unavailable. The system is then forever in preferred direct printing and
-+ * any printing threads will exit.
-+ *
-+ * Must *not* be called under console_lock. Use
-+ * __printk_fallback_preferred_direct() if already holding console_lock.
-+ */
-+static void printk_fallback_preferred_direct(void)
-+{
-+ console_lock();
-+ __printk_fallback_preferred_direct();
-+ console_unlock();
-+}
-+
-+/*
-+ * Print a record for a given console, not allowing another printk() caller
-+ * to take over. This is appropriate for contexts that do not have the
-+ * console_lock.
-+ *
-+ * See __console_emit_next_record() for argument and return details.
-+ */
-+static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
-+ char *dropped_text)
-+{
-+ return __console_emit_next_record(con, text, ext_text, dropped_text, NULL);
-+}
-+
-+static bool printer_should_wake(struct console *con, u64 seq)
-+{
-+ short flags;
-+
-+ if (kthread_should_stop() || !printk_kthreads_available)
-+ return true;
-+
-+ if (con->blocked ||
-+ console_kthreads_atomically_blocked() ||
-+ block_console_kthreads ||
-+ system_state > SYSTEM_RUNNING ||
-+ oops_in_progress) {
-+ return false;
-+ }
-+
-+ /*
-+ * This is an unsafe read from con->flags, but a false positive is
-+ * not a problem. Worst case it would allow the printer to wake up
-+ * although it is disabled. But the printer will notice that when
-+ * attempting to print and instead go back to sleep.
-+ */
-+ flags = data_race(READ_ONCE(con->flags));
-+
-+ if (!__console_is_usable(flags))
-+ return false;
-+
-+ return prb_read_valid(prb, seq, NULL);
-+}
-+
-+static int printk_kthread_func(void *data)
-+{
-+ struct console *con = data;
-+ char *dropped_text = NULL;
-+ char *ext_text = NULL;
-+ u64 seq = 0;
-+ char *text;
-+ int error;
-+
-+ text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
-+ if (!text) {
-+ con_printk(KERN_ERR, con, "failed to allocate text buffer\n");
-+ printk_fallback_preferred_direct();
-+ goto out;
-+ }
-+
-+ if (con->flags & CON_EXTENDED) {
-+ ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL);
-+ if (!ext_text) {
-+ con_printk(KERN_ERR, con, "failed to allocate ext_text buffer\n");
-+ printk_fallback_preferred_direct();
-+ goto out;
-+ }
-+ } else {
-+ dropped_text = kmalloc(DROPPED_TEXT_MAX, GFP_KERNEL);
-+ if (!dropped_text) {
-+ con_printk(KERN_ERR, con, "failed to allocate dropped_text buffer\n");
-+ printk_fallback_preferred_direct();
-+ goto out;
-+ }
-+ }
-+
-+ con_printk(KERN_INFO, con, "printing thread started\n");
-+ for (;;) {
-+ /*
-+ * Guarantee this task is visible on the waitqueue before
-+ * checking the wake condition.
-+ *
-+ * The full memory barrier within set_current_state() of
-+ * prepare_to_wait_event() pairs with the full memory barrier
-+ * within wq_has_sleeper().
-+ *
-+ * This pairs with __wake_up_klogd:A.
-+ */
-+ error = wait_event_interruptible(log_wait,
-+ printer_should_wake(con, seq)); /* LMM(printk_kthread_func:A) */
-+
-+ if (kthread_should_stop() || !printk_kthreads_available)
-+ break;
-+
-+ if (error)
-+ continue;
-+
-+ error = mutex_lock_interruptible(&con->lock);
-+ if (error)
-+ continue;
-+
-+ if (con->blocked ||
-+ !console_kthread_printing_tryenter()) {
-+ /* Another context has locked the console_lock. */
-+ mutex_unlock(&con->lock);
-+ continue;
-+ }
-+
-+ /*
-+ * Although this context has not locked the console_lock, it
-+ * is known that the console_lock is not locked and it is not
-+ * possible for any other context to lock the console_lock.
-+ * Therefore it is safe to read con->flags.
-+ */
-+
-+ if (!__console_is_usable(con->flags)) {
-+ console_kthread_printing_exit();
-+ mutex_unlock(&con->lock);
-+ continue;
-+ }
-+
-+ /*
-+ * Even though the printk kthread is always preemptible, it is
-+ * still not allowed to call cond_resched() from within
-+ * console drivers. The task may become non-preemptible in the
-+ * console driver call chain. For example, vt_console_print()
-+ * takes a spinlock and then can call into fbcon_redraw(),
-+ * which can conditionally invoke cond_resched().
-+ */
-+ console_may_schedule = 0;
-+ console_emit_next_record(con, text, ext_text, dropped_text);
-+
-+ seq = con->seq;
-+
-+ console_kthread_printing_exit();
-+
-+ mutex_unlock(&con->lock);
-+ }
-+
-+ con_printk(KERN_INFO, con, "printing thread stopped\n");
-+out:
-+ kfree(dropped_text);
-+ kfree(ext_text);
-+ kfree(text);
-+
-+ console_lock();
-+ /*
-+ * If this kthread is being stopped by another task, con->thread will
-+ * already be NULL. That is fine. The important thing is that it is
-+ * NULL after the kthread exits.
-+ */
-+ con->thread = NULL;
-+ console_unlock();
-+
-+ return 0;
-+}
-+
-+/* Must be called under console_lock. */
-+static void printk_start_kthread(struct console *con)
-+{
-+ /*
-+ * Do not start a kthread if there is no write() callback. The
-+ * kthreads assume the write() callback exists.
-+ */
-+ if (!con->write)
-+ return;
-+
-+ con->thread = kthread_run(printk_kthread_func, con,
-+ "pr/%s%d", con->name, con->index);
-+ if (IS_ERR(con->thread)) {
-+ con->thread = NULL;
-+ con_printk(KERN_ERR, con, "unable to start printing thread\n");
-+ __printk_fallback_preferred_direct();
-+ return;
-+ }
-+}
-+
- /*
- * Delayed printk version, for scheduler-internal messages:
- */
--#define PRINTK_PENDING_WAKEUP 0x01
--#define PRINTK_PENDING_OUTPUT 0x02
-+#define PRINTK_PENDING_WAKEUP 0x01
-+#define PRINTK_PENDING_DIRECT_OUTPUT 0x02
-
- static DEFINE_PER_CPU(int, printk_pending);
-
-@@ -3456,10 +3913,14 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work)
- {
- int pending = this_cpu_xchg(printk_pending, 0);
-
-- if (pending & PRINTK_PENDING_OUTPUT) {
-+ if (pending & PRINTK_PENDING_DIRECT_OUTPUT) {
-+ printk_prefer_direct_enter();
-+
- /* If trylock fails, someone else is doing the printing */
- if (console_trylock())
- console_unlock();
-+
-+ printk_prefer_direct_exit();
- }
-
- if (pending & PRINTK_PENDING_WAKEUP)
-@@ -3484,10 +3945,11 @@ static void __wake_up_klogd(int val)
- * prepare_to_wait_event(), which is called after ___wait_event() adds
- * the waiter but before it has checked the wait condition.
- *
-- * This pairs with devkmsg_read:A and syslog_print:A.
-+ * This pairs with devkmsg_read:A, syslog_print:A, and
-+ * printk_kthread_func:A.
- */
- if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */
-- (val & PRINTK_PENDING_OUTPUT)) {
-+ (val & PRINTK_PENDING_DIRECT_OUTPUT)) {
- this_cpu_or(printk_pending, val);
- irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
- }
-@@ -3527,7 +3989,17 @@ void defer_console_output(void)
- * New messages may have been added directly to the ringbuffer
- * using vprintk_store(), so wake any waiters as well.
- */
-- __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
-+ int val = PRINTK_PENDING_WAKEUP;
-+
-+ /*
-+ * Make sure that some context will print the messages when direct
-+ * printing is allowed. This happens in situations when the kthreads
-+ * may not be as reliable or perhaps unusable.
-+ */
-+ if (allow_direct_printing())
-+ val |= PRINTK_PENDING_DIRECT_OUTPUT;
-+
-+ __wake_up_klogd(val);
- }
-
- void printk_trigger_flush(void)
-diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
-index 6d10927a07d8..8e8fd2fb0a5b 100644
---- a/kernel/printk/printk_safe.c
-+++ b/kernel/printk/printk_safe.c
-@@ -8,7 +8,9 @@
- #include
- #include
- #include
-+#include
- #include
-+#include
-
- #include "internal.h"
-
-@@ -45,3 +47,33 @@ asmlinkage int vprintk(const char *fmt, va_list args)
- return vprintk_default(fmt, args);
- }
- EXPORT_SYMBOL(vprintk);
-+
-+/**
-+ * try_block_console_kthreads() - Try to block console kthreads and
-+ * make the global console_lock() avaialble
-+ *
-+ * @timeout_ms: The maximum time (in ms) to wait.
-+ *
-+ * Prevent console kthreads from starting processing new messages. Wait
-+ * until the global console_lock() become available.
-+ *
-+ * Context: Can be called in any context.
-+ */
-+void try_block_console_kthreads(int timeout_ms)
-+{
-+ block_console_kthreads = true;
-+
-+ /* Do not wait when the console lock could not be safely taken. */
-+ if (this_cpu_read(printk_context) || in_nmi())
-+ return;
-+
-+ while (timeout_ms > 0) {
-+ if (console_trylock()) {
-+ console_unlock();
-+ return;
-+ }
-+
-+ udelay(1000);
-+ timeout_ms -= 1;
-+ }
-+}
-diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
-index 7d15b5b5a235..7b411d663e8f 100644
---- a/kernel/rcu/tree_stall.h
-+++ b/kernel/rcu/tree_stall.h
-@@ -648,6 +648,7 @@ static void print_cpu_stall(unsigned long gps)
- * See Documentation/RCU/stallwarn.rst for info on how to debug
- * RCU CPU stall warnings.
- */
-+ printk_prefer_direct_enter();
- trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected"));
- pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
- raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
-@@ -682,6 +683,7 @@ static void print_cpu_stall(unsigned long gps)
- */
- set_tsk_need_resched(current);
- set_preempt_need_resched();
-+ printk_prefer_direct_exit();
- }
-
- static void check_cpu_stall(struct rcu_data *rdp)
-diff --git a/kernel/reboot.c b/kernel/reboot.c
-index 6ebef11c8876..23a8cfed1a72 100644
---- a/kernel/reboot.c
-+++ b/kernel/reboot.c
-@@ -83,6 +83,7 @@ void kernel_restart_prepare(char *cmd)
- {
- blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
- system_state = SYSTEM_RESTART;
-+ try_block_console_kthreads(10000);
- usermodehelper_disable();
- device_shutdown();
- }
-@@ -283,6 +284,7 @@ static void kernel_shutdown_prepare(enum system_states state)
- blocking_notifier_call_chain(&reboot_notifier_list,
- (state == SYSTEM_HALT) ? SYS_HALT : SYS_POWER_OFF, NULL);
- system_state = state;
-+ try_block_console_kthreads(10000);
- usermodehelper_disable();
- device_shutdown();
- }
-@@ -837,9 +839,11 @@ static int __orderly_reboot(void)
- ret = run_cmd(reboot_cmd);
-
- if (ret) {
-+ printk_prefer_direct_enter();
- pr_warn("Failed to start orderly reboot: forcing the issue\n");
- emergency_sync();
- kernel_restart(NULL);
-+ printk_prefer_direct_exit();
- }
-
- return ret;
-@@ -852,6 +856,7 @@ static int __orderly_poweroff(bool force)
- ret = run_cmd(poweroff_cmd);
-
- if (ret && force) {
-+ printk_prefer_direct_enter();
- pr_warn("Failed to start orderly shutdown: forcing the issue\n");
-
- /*
-@@ -861,6 +866,7 @@ static int __orderly_poweroff(bool force)
- */
- emergency_sync();
- kernel_power_off();
-+ printk_prefer_direct_exit();
- }
-
- return ret;
-@@ -918,6 +924,8 @@ EXPORT_SYMBOL_GPL(orderly_reboot);
- */
- static void hw_failure_emergency_poweroff_func(struct work_struct *work)
- {
-+ printk_prefer_direct_enter();
-+
- /*
- * We have reached here after the emergency shutdown waiting period has
- * expired. This means orderly_poweroff has not been able to shut off
-@@ -934,6 +942,8 @@ static void hw_failure_emergency_poweroff_func(struct work_struct *work)
- */
- pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n");
- emergency_restart();
-+
-+ printk_prefer_direct_exit();
- }
-
- static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work,
-@@ -972,11 +982,13 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
- {
- static atomic_t allow_proceed = ATOMIC_INIT(1);
-
-+ printk_prefer_direct_enter();
-+
- pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason);
-
- /* Shutdown should be initiated only once. */
- if (!atomic_dec_and_test(&allow_proceed))
-- return;
-+ goto out;
-
- /*
- * Queue a backup emergency shutdown in the event of
-@@ -984,6 +996,8 @@ void hw_protection_shutdown(const char *reason, int ms_until_forced)
- */
- hw_failure_emergency_poweroff(ms_until_forced);
- orderly_poweroff(true);
-+out:
-+ printk_prefer_direct_exit();
- }
- EXPORT_SYMBOL_GPL(hw_protection_shutdown);
-
-diff --git a/kernel/watchdog.c b/kernel/watchdog.c
-index 45693fb3e08d..f366008298ac 100644
---- a/kernel/watchdog.c
-+++ b/kernel/watchdog.c
-@@ -431,6 +431,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
- /* Start period for the next softlockup warning. */
- update_report_ts();
-
-+ printk_prefer_direct_enter();
-+
- pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
- smp_processor_id(), duration,
- current->comm, task_pid_nr(current));
-@@ -449,6 +451,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
- add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
- if (softlockup_panic)
- panic("softlockup: hung tasks");
-+
-+ printk_prefer_direct_exit();
- }
-
- return HRTIMER_RESTART;
-diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
-index 1e8a49dc956e..7c977b945c92 100644
---- a/kernel/watchdog_hld.c
-+++ b/kernel/watchdog_hld.c
-@@ -135,6 +135,8 @@ static void watchdog_overflow_callback(struct perf_event *event,
- if (__this_cpu_read(hard_watchdog_warn) == true)
- return;
-
-+ printk_prefer_direct_enter();
-+
- pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n",
- this_cpu);
- print_modules();
-@@ -155,6 +157,8 @@ static void watchdog_overflow_callback(struct perf_event *event,
- if (hardlockup_panic)
- nmi_panic(regs, "Hard LOCKUP");
-
-+ printk_prefer_direct_exit();
-+
- __this_cpu_write(hard_watchdog_warn, true);
- return;
- }
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0012-softirq-Use-a-dedicated-thread-for-timer-wakeups.patch b/buildroot-external/patches/linux/0019-softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
similarity index 91%
rename from buildroot-external/patches/linux/0012-softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
rename to buildroot-external/patches/linux/0019-softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
index 1443565e..53308866 100644
--- a/buildroot-external/patches/linux/0012-softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
+++ b/buildroot-external/patches/linux/0019-softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
@@ -1,7 +1,7 @@
-From d2864e2d892720940d5a3acb606abcc10089bd73 Mon Sep 17 00:00:00 2001
+From ddbe9f49b78c945704d68bd7333b354b13527703 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior
Date: Wed, 1 Dec 2021 17:41:09 +0100
-Subject: [PATCH 12/62] softirq: Use a dedicated thread for timer wakeups.
+Subject: [PATCH 019/195] softirq: Use a dedicated thread for timer wakeups.
A timer/hrtimer softirq is raised in-IRQ context. With threaded
interrupts enabled or on PREEMPT_RT this leads to waking the ksoftirqd
@@ -72,10 +72,10 @@ index 4a1dc88ddbff..0efba74a835c 100644
static inline struct task_struct *this_cpu_ksoftirqd(void)
diff --git a/kernel/softirq.c b/kernel/softirq.c
-index c8a6913c067d..ed6d7c41aa17 100644
+index 210cf5f8d92c..c29c30106eb8 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
-@@ -637,6 +637,29 @@ static inline void tick_irq_exit(void)
+@@ -619,6 +619,29 @@ static inline void tick_irq_exit(void)
#endif
}
@@ -105,7 +105,7 @@ index c8a6913c067d..ed6d7c41aa17 100644
static inline void __irq_exit_rcu(void)
{
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
-@@ -646,8 +669,13 @@ static inline void __irq_exit_rcu(void)
+@@ -628,8 +651,13 @@ static inline void __irq_exit_rcu(void)
#endif
account_hardirq_exit(current);
preempt_count_sub(HARDIRQ_OFFSET);
@@ -121,7 +121,7 @@ index c8a6913c067d..ed6d7c41aa17 100644
tick_irq_exit();
}
-@@ -976,12 +1004,70 @@ static struct smp_hotplug_thread softirq_threads = {
+@@ -963,12 +991,70 @@ static struct smp_hotplug_thread softirq_threads = {
.thread_comm = "ksoftirqd/%u",
};
@@ -194,10 +194,10 @@ index c8a6913c067d..ed6d7c41aa17 100644
}
early_initcall(spawn_ksoftirqd);
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
-index 5561dabc9b22..c5d480d5da15 100644
+index 760793998cdd..9f4d7ab03e39 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1805,7 +1805,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
+@@ -1808,7 +1808,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
if (!ktime_before(now, cpu_base->softirq_expires_next)) {
cpu_base->softirq_expires_next = KTIME_MAX;
cpu_base->softirq_activated = 1;
@@ -206,7 +206,7 @@ index 5561dabc9b22..c5d480d5da15 100644
}
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
-@@ -1918,7 +1918,7 @@ void hrtimer_run_queues(void)
+@@ -1921,7 +1921,7 @@ void hrtimer_run_queues(void)
if (!ktime_before(now, cpu_base->softirq_expires_next)) {
cpu_base->softirq_expires_next = KTIME_MAX;
cpu_base->softirq_activated = 1;
@@ -216,10 +216,10 @@ index 5561dabc9b22..c5d480d5da15 100644
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
-index 717fcb9fb14a..e6219da89933 100644
+index 63a8ce7177dd..7cad6fe3c035 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1822,7 +1822,7 @@ static void run_local_timers(void)
+@@ -2054,7 +2054,7 @@ static void run_local_timers(void)
if (time_before(jiffies, base->next_expiry))
return;
}
diff --git a/buildroot-external/patches/linux/0020-printk-add-infrastucture-for-atomic-consoles.patch b/buildroot-external/patches/linux/0020-printk-add-infrastucture-for-atomic-consoles.patch
deleted file mode 100644
index b3e7008c..00000000
--- a/buildroot-external/patches/linux/0020-printk-add-infrastucture-for-atomic-consoles.patch
+++ /dev/null
@@ -1,607 +0,0 @@
-From 30f2ae30ddb074556a0f9a9a6bcc4bb071d7ea12 Mon Sep 17 00:00:00 2001
-From: John Ogness
-Date: Fri, 4 Feb 2022 16:01:17 +0106
-Subject: [PATCH 20/62] printk: add infrastucture for atomic consoles
-
-Many times it is not possible to see the console output on
-panic because printing threads cannot be scheduled and/or the
-console is already taken and forcibly overtaking/busting the
-locks does provide the hoped results.
-
-Introduce a new infrastructure to support "atomic consoles".
-A new optional callback in struct console, write_atomic(), is
-available for consoles to provide an implemention for writing
-console messages. The implementation must be NMI safe if they
-can run on an architecture where NMIs exist.
-
-Console drivers implementing the write_atomic() callback must
-also select CONFIG_HAVE_ATOMIC_CONSOLE in order to enable the
-atomic console code within the printk subsystem.
-
-If atomic consoles are available, panic() will flush the kernel
-log only to the atomic consoles (before busting spinlocks).
-Afterwards, panic() will continue as before, which includes
-attempting to flush the other (non-atomic) consoles.
-
-Signed-off-by: John Ogness
-Signed-off-by: Sebastian Andrzej Siewior
----
- include/linux/console.h | 16 ++-
- init/Kconfig | 4 +
- kernel/panic.c | 6 +-
- kernel/printk/printk.c | 293 ++++++++++++++++++++++++++++++++++++----
- 4 files changed, 290 insertions(+), 29 deletions(-)
-
-diff --git a/include/linux/console.h b/include/linux/console.h
-index 143653090c48..8a813cbaf928 100644
---- a/include/linux/console.h
-+++ b/include/linux/console.h
-@@ -138,9 +138,19 @@ static inline int con_debug_leave(void)
- #define CON_BRL (32) /* Used for a braille device */
- #define CON_EXTENDED (64) /* Use the extended output format a la /dev/kmsg */
-
-+#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
-+struct console_atomic_data {
-+ u64 seq;
-+ char *text;
-+ char *ext_text;
-+ char *dropped_text;
-+};
-+#endif
-+
- struct console {
- char name[16];
- void (*write)(struct console *, const char *, unsigned);
-+ void (*write_atomic)(struct console *, const char *, unsigned);
- int (*read)(struct console *, char *, unsigned);
- struct tty_driver *(*device)(struct console *, int *);
- void (*unblank)(void);
-@@ -153,7 +163,10 @@ struct console {
- uint ispeed;
- uint ospeed;
- u64 seq;
-- unsigned long dropped;
-+ atomic_long_t dropped;
-+#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
-+ struct console_atomic_data *atomic_data;
-+#endif
- struct task_struct *thread;
- bool blocked;
-
-@@ -184,6 +197,7 @@ extern int console_set_on_cmdline;
- extern struct console *early_console;
-
- enum con_flush_mode {
-+ CONSOLE_ATOMIC_FLUSH_PENDING,
- CONSOLE_FLUSH_PENDING,
- CONSOLE_REPLAY_ALL,
- };
-diff --git a/init/Kconfig b/init/Kconfig
-index de255842f5d0..d45312780b3a 100644
---- a/init/Kconfig
-+++ b/init/Kconfig
-@@ -1582,6 +1582,10 @@ config PRINTK
- very difficult to diagnose system problems, saying N here is
- strongly discouraged.
-
-+config HAVE_ATOMIC_CONSOLE
-+ bool
-+ default n
-+
- config BUG
- bool "BUG() support" if EXPERT
- default y
-diff --git a/kernel/panic.c b/kernel/panic.c
-index 88cd873c7c30..97cc495d95f8 100644
---- a/kernel/panic.c
-+++ b/kernel/panic.c
-@@ -322,7 +322,6 @@ void panic(const char *fmt, ...)
- panic_smp_self_stop();
-
- console_verbose();
-- bust_spinlocks(1);
- va_start(args, fmt);
- len = vscnprintf(buf, sizeof(buf), fmt, args);
- va_end(args);
-@@ -339,6 +338,11 @@ void panic(const char *fmt, ...)
- dump_stack();
- #endif
-
-+ /* If atomic consoles are available, flush the kernel log. */
-+ console_flush_on_panic(CONSOLE_ATOMIC_FLUSH_PENDING);
-+
-+ bust_spinlocks(1);
-+
- /*
- * If kgdb is enabled, give it a chance to run before we stop all
- * the other CPUs or else we won't be able to debug processes left
-diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
-index e9f9b66608a0..73b1727087c7 100644
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -44,6 +44,7 @@
- #include
- #include
- #include
-+#include
- #include
- #include
- #include
-@@ -2060,19 +2061,28 @@ static int console_trylock_spinning(void)
- * dropped, a dropped message will be written out first.
- */
- static void call_console_driver(struct console *con, const char *text, size_t len,
-- char *dropped_text)
-+ char *dropped_text, bool atomic_printing)
- {
-+ unsigned long dropped = 0;
- size_t dropped_len;
-
-- if (con->dropped && dropped_text) {
-+ if (dropped_text)
-+ dropped = atomic_long_xchg_relaxed(&con->dropped, 0);
-+
-+ if (dropped) {
- dropped_len = snprintf(dropped_text, DROPPED_TEXT_MAX,
- "** %lu printk messages dropped **\n",
-- con->dropped);
-- con->dropped = 0;
-- con->write(con, dropped_text, dropped_len);
-+ dropped);
-+ if (atomic_printing)
-+ con->write_atomic(con, dropped_text, dropped_len);
-+ else
-+ con->write(con, dropped_text, dropped_len);
- }
-
-- con->write(con, text, len);
-+ if (atomic_printing)
-+ con->write_atomic(con, text, len);
-+ else
-+ con->write(con, text, len);
- }
-
- /*
-@@ -2430,6 +2440,76 @@ asmlinkage __visible int _printk(const char *fmt, ...)
- }
- EXPORT_SYMBOL(_printk);
-
-+#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
-+static void __free_atomic_data(struct console_atomic_data *d)
-+{
-+ kfree(d->text);
-+ kfree(d->ext_text);
-+ kfree(d->dropped_text);
-+}
-+
-+static void free_atomic_data(struct console_atomic_data *d)
-+{
-+ int count = 1;
-+ int i;
-+
-+ if (!d)
-+ return;
-+
-+#ifdef CONFIG_HAVE_NMI
-+ count = 2;
-+#endif
-+
-+ for (i = 0; i < count; i++)
-+ __free_atomic_data(&d[i]);
-+ kfree(d);
-+}
-+
-+static int __alloc_atomic_data(struct console_atomic_data *d, short flags)
-+{
-+ d->text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
-+ if (!d->text)
-+ return -1;
-+
-+ if (flags & CON_EXTENDED) {
-+ d->ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL);
-+ if (!d->ext_text)
-+ return -1;
-+ } else {
-+ d->dropped_text = kmalloc(DROPPED_TEXT_MAX, GFP_KERNEL);
-+ if (!d->dropped_text)
-+ return -1;
-+ }
-+
-+ return 0;
-+}
-+
-+static struct console_atomic_data *alloc_atomic_data(short flags)
-+{
-+ struct console_atomic_data *d;
-+ int count = 1;
-+ int i;
-+
-+#ifdef CONFIG_HAVE_NMI
-+ count = 2;
-+#endif
-+
-+ d = kzalloc(sizeof(*d) * count, GFP_KERNEL);
-+ if (!d)
-+ goto err_out;
-+
-+ for (i = 0; i < count; i++) {
-+ if (__alloc_atomic_data(&d[i], flags) != 0)
-+ goto err_out;
-+ }
-+
-+ return d;
-+err_out:
-+ free_atomic_data(d);
-+ return NULL;
-+}
-+#endif /* CONFIG_HAVE_ATOMIC_CONSOLE */
-+
- static bool pr_flush(int timeout_ms, bool reset_on_progress);
- static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
-
-@@ -2445,6 +2525,8 @@ static void printk_start_kthread(struct console *con);
- #define prb_first_valid_seq(rb) 0
- #define prb_next_seq(rb) 0
-
-+#define free_atomic_data(d)
-+
- static u64 syslog_seq;
-
- static size_t record_print_text(const struct printk_record *r,
-@@ -2463,7 +2545,7 @@ static ssize_t msg_print_ext_body(char *buf, size_t size,
- static void console_lock_spinning_enable(void) { }
- static int console_lock_spinning_disable_and_check(void) { return 0; }
- static void call_console_driver(struct console *con, const char *text, size_t len,
-- char *dropped_text)
-+ char *dropped_text, bool atomic_printing)
- {
- }
- static bool suppress_message_printing(int level) { return false; }
-@@ -2819,10 +2901,20 @@ static inline bool __console_is_usable(short flags)
- *
- * Requires holding the console_lock.
- */
--static inline bool console_is_usable(struct console *con)
-+static inline bool console_is_usable(struct console *con, bool atomic_printing)
- {
-- if (!con->write)
-+ if (atomic_printing) {
-+#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
-+ if (!con->write_atomic)
-+ return false;
-+ if (!con->atomic_data)
-+ return false;
-+#else
-+ return false;
-+#endif
-+ } else if (!con->write) {
- return false;
-+ }
-
- return __console_is_usable(con->flags);
- }
-@@ -2847,6 +2939,66 @@ static void __console_unlock(void)
- up_console_sem();
- }
-
-+static u64 read_console_seq(struct console *con)
-+{
-+#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
-+ unsigned long flags;
-+ u64 seq2;
-+ u64 seq;
-+
-+ if (!con->atomic_data)
-+ return con->seq;
-+
-+ printk_cpu_sync_get_irqsave(flags);
-+
-+ seq = con->seq;
-+ seq2 = con->atomic_data[0].seq;
-+ if (seq2 > seq)
-+ seq = seq2;
-+#ifdef CONFIG_HAVE_NMI
-+ seq2 = con->atomic_data[1].seq;
-+ if (seq2 > seq)
-+ seq = seq2;
-+#endif
-+
-+ printk_cpu_sync_put_irqrestore(flags);
-+
-+ return seq;
-+#else /* CONFIG_HAVE_ATOMIC_CONSOLE */
-+ return con->seq;
-+#endif
-+}
-+
-+static void write_console_seq(struct console *con, u64 val, bool atomic_printing)
-+{
-+#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
-+ unsigned long flags;
-+ u64 *seq;
-+
-+ if (!con->atomic_data) {
-+ con->seq = val;
-+ return;
-+ }
-+
-+ printk_cpu_sync_get_irqsave(flags);
-+
-+ if (atomic_printing) {
-+ seq = &con->atomic_data[0].seq;
-+#ifdef CONFIG_HAVE_NMI
-+ if (in_nmi())
-+ seq = &con->atomic_data[1].seq;
-+#endif
-+ } else {
-+ seq = &con->seq;
-+ }
-+ *seq = val;
-+
-+ printk_cpu_sync_put_irqrestore(flags);
-+#else /* CONFIG_HAVE_ATOMIC_CONSOLE */
-+ con->seq = val;
-+#endif
-+}
-+
- /*
- * Print one record for the given console. The record printed is whatever
- * record is the next available record for the given console.
-@@ -2859,6 +3011,8 @@ static void __console_unlock(void)
- * If dropped messages should be printed, @dropped_text is a buffer of size
- * DROPPED_TEXT_MAX. Otherwise @dropped_text must be NULL.
- *
-+ * @atomic_printing specifies if atomic printing should be used.
-+ *
- * @handover will be set to true if a printk waiter has taken over the
- * console_lock, in which case the caller is no longer holding the
- * console_lock. Otherwise it is set to false. A NULL pointer may be provided
-@@ -2871,7 +3025,8 @@ static void __console_unlock(void)
- * Requires con->lock otherwise.
- */
- static bool __console_emit_next_record(struct console *con, char *text, char *ext_text,
-- char *dropped_text, bool *handover)
-+ char *dropped_text, bool atomic_printing,
-+ bool *handover)
- {
- static atomic_t panic_console_dropped = ATOMIC_INIT(0);
- struct printk_info info;
-@@ -2879,18 +3034,22 @@ static bool __console_emit_next_record(struct console *con, char *text, char *ex
- unsigned long flags;
- char *write_text;
- size_t len;
-+ u64 seq;
-
- prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
-
- if (handover)
- *handover = false;
-
-- if (!prb_read_valid(prb, con->seq, &r))
-+ seq = read_console_seq(con);
-+
-+ if (!prb_read_valid(prb, seq, &r))
- return false;
-
-- if (con->seq != r.info->seq) {
-- con->dropped += r.info->seq - con->seq;
-- con->seq = r.info->seq;
-+ if (seq != r.info->seq) {
-+ atomic_long_add((unsigned long)(r.info->seq - seq), &con->dropped);
-+ write_console_seq(con, r.info->seq, atomic_printing);
-+ seq = r.info->seq;
- if (panic_in_progress() &&
- atomic_fetch_inc_relaxed(&panic_console_dropped) > 10) {
- suppress_panic_printk = 1;
-@@ -2900,7 +3059,7 @@ static bool __console_emit_next_record(struct console *con, char *text, char *ex
-
- /* Skip record that has level above the console loglevel. */
- if (suppress_message_printing(r.info->level)) {
-- con->seq++;
-+ write_console_seq(con, seq + 1, atomic_printing);
- goto skip;
- }
-
-@@ -2932,9 +3091,9 @@ static bool __console_emit_next_record(struct console *con, char *text, char *ex
- stop_critical_timings();
- }
-
-- call_console_driver(con, write_text, len, dropped_text);
-+ call_console_driver(con, write_text, len, dropped_text, atomic_printing);
-
-- con->seq++;
-+ write_console_seq(con, seq + 1, atomic_printing);
-
- if (handover) {
- start_critical_timings();
-@@ -2966,7 +3125,7 @@ static bool console_emit_next_record_transferable(struct console *con, char *tex
- handover = NULL;
- }
-
-- return __console_emit_next_record(con, text, ext_text, dropped_text, handover);
-+ return __console_emit_next_record(con, text, ext_text, dropped_text, false, handover);
- }
-
- /*
-@@ -3014,7 +3173,7 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
- for_each_console(con) {
- bool progress;
-
-- if (!console_is_usable(con))
-+ if (!console_is_usable(con, false))
- continue;
- any_usable = true;
-
-@@ -3049,6 +3208,68 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
- return any_usable;
- }
-
-+#if defined(CONFIG_HAVE_ATOMIC_CONSOLE) && defined(CONFIG_PRINTK)
-+static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
-+ char *dropped_text, bool atomic_printing);
-+
-+static void atomic_console_flush_all(void)
-+{
-+ unsigned long flags;
-+ struct console *con;
-+ bool any_progress;
-+ int index = 0;
-+
-+ if (console_suspended)
-+ return;
-+
-+#ifdef CONFIG_HAVE_NMI
-+ if (in_nmi())
-+ index = 1;
-+#endif
-+
-+ printk_cpu_sync_get_irqsave(flags);
-+
-+ do {
-+ any_progress = false;
-+
-+ for_each_console(con) {
-+ bool progress;
-+
-+ if (!console_is_usable(con, true))
-+ continue;
-+
-+ if (con->flags & CON_EXTENDED) {
-+ /* Extended consoles do not print "dropped messages". */
-+ progress = console_emit_next_record(con,
-+ &con->atomic_data->text[index],
-+ &con->atomic_data->ext_text[index],
-+ NULL,
-+ true);
-+ } else {
-+ progress = console_emit_next_record(con,
-+ &con->atomic_data->text[index],
-+ NULL,
-+ &con->atomic_data->dropped_text[index],
-+ true);
-+ }
-+
-+ if (!progress)
-+ continue;
-+ any_progress = true;
-+
-+ touch_softlockup_watchdog_sync();
-+ clocksource_touch_watchdog();
-+ rcu_cpu_stall_reset();
-+ touch_nmi_watchdog();
-+ }
-+ } while (any_progress);
-+
-+ printk_cpu_sync_put_irqrestore(flags);
-+}
-+#else /* CONFIG_HAVE_ATOMIC_CONSOLE && CONFIG_PRINTK */
-+#define atomic_console_flush_all()
-+#endif
-+
- /**
- * console_unlock - unlock the console system
- *
-@@ -3164,6 +3385,11 @@ void console_unblank(void)
- */
- void console_flush_on_panic(enum con_flush_mode mode)
- {
-+ if (mode == CONSOLE_ATOMIC_FLUSH_PENDING) {
-+ atomic_console_flush_all();
-+ return;
-+ }
-+
- /*
- * If someone else is holding the console lock, trylock will fail
- * and may_schedule may be set. Ignore and proceed to unlock so
-@@ -3180,7 +3406,7 @@ void console_flush_on_panic(enum con_flush_mode mode)
-
- seq = prb_first_valid_seq(prb);
- for_each_console(c)
-- c->seq = seq;
-+ write_console_seq(c, seq, false);
- }
- console_unlock();
- }
-@@ -3420,19 +3646,22 @@ void register_console(struct console *newcon)
- console_drivers->next = newcon;
- }
-
-- newcon->dropped = 0;
-+ atomic_long_set(&newcon->dropped, 0);
- newcon->thread = NULL;
- newcon->blocked = true;
- mutex_init(&newcon->lock);
-+#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
-+ newcon->atomic_data = NULL;
-+#endif
-
- if (newcon->flags & CON_PRINTBUFFER) {
- /* Get a consistent copy of @syslog_seq. */
- mutex_lock(&syslog_lock);
-- newcon->seq = syslog_seq;
-+ write_console_seq(newcon, syslog_seq, false);
- mutex_unlock(&syslog_lock);
- } else {
- /* Begin with next message. */
-- newcon->seq = prb_next_seq(prb);
-+ write_console_seq(newcon, prb_next_seq(prb), false);
- }
-
- if (printk_kthreads_available)
-@@ -3515,6 +3744,10 @@ int unregister_console(struct console *console)
-
- console_sysfs_notify();
-
-+#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
-+ free_atomic_data(console->atomic_data);
-+#endif
-+
- if (console->exit)
- res = console->exit(console);
-
-@@ -3645,7 +3878,7 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
- for_each_console(c) {
- if (con && con != c)
- continue;
-- if (!console_is_usable(c))
-+ if (!console_is_usable(c, false))
- continue;
- printk_seq = c->seq;
- if (printk_seq < seq)
-@@ -3734,9 +3967,10 @@ static void printk_fallback_preferred_direct(void)
- * See __console_emit_next_record() for argument and return details.
- */
- static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
-- char *dropped_text)
-+ char *dropped_text, bool atomic_printing)
- {
-- return __console_emit_next_record(con, text, ext_text, dropped_text, NULL);
-+ return __console_emit_next_record(con, text, ext_text, dropped_text,
-+ atomic_printing, NULL);
- }
-
- static bool printer_should_wake(struct console *con, u64 seq)
-@@ -3777,6 +4011,11 @@ static int printk_kthread_func(void *data)
- char *text;
- int error;
-
-+#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
-+ if (con->write_atomic)
-+ con->atomic_data = alloc_atomic_data(con->flags);
-+#endif
-+
- text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
- if (!text) {
- con_printk(KERN_ERR, con, "failed to allocate text buffer\n");
-@@ -3854,7 +4093,7 @@ static int printk_kthread_func(void *data)
- * which can conditionally invoke cond_resched().
- */
- console_may_schedule = 0;
-- console_emit_next_record(con, text, ext_text, dropped_text);
-+ console_emit_next_record(con, text, ext_text, dropped_text, false);
-
- seq = con->seq;
-
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0013-rcutorture-Also-force-sched-priority-to-timersd-on-b.patch b/buildroot-external/patches/linux/0020-rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
similarity index 88%
rename from buildroot-external/patches/linux/0013-rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
rename to buildroot-external/patches/linux/0020-rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
index 561de80d..c716957b 100644
--- a/buildroot-external/patches/linux/0013-rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
+++ b/buildroot-external/patches/linux/0020-rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
@@ -1,7 +1,7 @@
-From f760da57a984738f88b96dfa5edb89d0723901ca Mon Sep 17 00:00:00 2001
+From f089c645b517d22f97433deffaaedc9fd6f9b598 Mon Sep 17 00:00:00 2001
From: Frederic Weisbecker
Date: Tue, 5 Apr 2022 03:07:51 +0200
-Subject: [PATCH 13/62] rcutorture: Also force sched priority to timersd on
+Subject: [PATCH 020/195] rcutorture: Also force sched priority to timersd on
boosting test.
ksoftirqd is statically boosted to the priority level right above the
@@ -46,10 +46,10 @@ index 0efba74a835c..f459b0f27c94 100644
extern void raise_hrtimer_softirq(void);
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
-index 503c2aa845a4..dcd8c0e44c00 100644
+index ade42d6a9d9b..eebb9b4548fb 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
-@@ -2363,6 +2363,12 @@ static int rcutorture_booster_init(unsigned int cpu)
+@@ -2408,6 +2408,12 @@ static int rcutorture_booster_init(unsigned int cpu)
WARN_ON_ONCE(!t);
sp.sched_priority = 2;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
@@ -63,10 +63,10 @@ index 503c2aa845a4..dcd8c0e44c00 100644
/* Don't allow time recalculation while creating a new task. */
diff --git a/kernel/softirq.c b/kernel/softirq.c
-index ed6d7c41aa17..1892af494cdd 100644
+index c29c30106eb8..1277abc94228 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
-@@ -638,7 +638,7 @@ static inline void tick_irq_exit(void)
+@@ -620,7 +620,7 @@ static inline void tick_irq_exit(void)
}
#ifdef CONFIG_PREEMPT_RT
diff --git a/buildroot-external/patches/linux/0021-serial-8250-implement-write_atomic.patch b/buildroot-external/patches/linux/0021-serial-8250-implement-write_atomic.patch
deleted file mode 100644
index 8872dd8f..00000000
--- a/buildroot-external/patches/linux/0021-serial-8250-implement-write_atomic.patch
+++ /dev/null
@@ -1,937 +0,0 @@
-From eda5ed51c656510b41e8003537c4c101dd18e51f Mon Sep 17 00:00:00 2001
-From: John Ogness
-Date: Fri, 4 Feb 2022 16:01:17 +0106
-Subject: [PATCH 21/62] serial: 8250: implement write_atomic
-
-Implement a non-sleeping NMI-safe write_atomic() console function in
-order to support atomic console printing during a panic.
-
-Trasmitting data requires disabling interrupts. Since write_atomic()
-can be called from any context, it may be called while another CPU
-is executing in console code. In order to maintain the correct state
-of the IER register, use the global cpu_sync to synchronize all
-access to the IER register. This synchronization is only necessary
-for serial ports that are being used as consoles.
-
-The global cpu_sync is also used to synchronize between the write()
-and write_atomic() callbacks. write() synchronizes per character,
-write_atomic() synchronizes per line.
-
-Signed-off-by: John Ogness
-Signed-off-by: Sebastian Andrzej Siewior
----
- drivers/tty/serial/8250/8250.h | 41 ++++-
- drivers/tty/serial/8250/8250_aspeed_vuart.c | 2 +-
- drivers/tty/serial/8250/8250_bcm7271.c | 21 ++-
- drivers/tty/serial/8250/8250_core.c | 24 ++-
- drivers/tty/serial/8250/8250_exar.c | 4 +-
- drivers/tty/serial/8250/8250_fsl.c | 3 +-
- drivers/tty/serial/8250/8250_ingenic.c | 3 +-
- drivers/tty/serial/8250/8250_mtk.c | 32 +++-
- drivers/tty/serial/8250/8250_omap.c | 18 +--
- drivers/tty/serial/8250/8250_port.c | 158 ++++++++++++++++----
- drivers/tty/serial/8250/Kconfig | 1 +
- include/linux/serial_8250.h | 5 +
- 12 files changed, 261 insertions(+), 51 deletions(-)
-
-diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
-index eeb7b43ebe53..b17715d340c3 100644
---- a/drivers/tty/serial/8250/8250.h
-+++ b/drivers/tty/serial/8250/8250.h
-@@ -176,12 +176,49 @@ static inline void serial_dl_write(struct uart_8250_port *up, int value)
- up->dl_write(up, value);
- }
-
-+static inline int serial8250_in_IER(struct uart_8250_port *up)
-+{
-+ struct uart_port *port = &up->port;
-+ unsigned long flags;
-+ bool is_console;
-+ int ier;
-+
-+ is_console = uart_console(port);
-+
-+ if (is_console)
-+ printk_cpu_sync_get_irqsave(flags);
-+
-+ ier = serial_in(up, UART_IER);
-+
-+ if (is_console)
-+ printk_cpu_sync_put_irqrestore(flags);
-+
-+ return ier;
-+}
-+
-+static inline void serial8250_set_IER(struct uart_8250_port *up, int ier)
-+{
-+ struct uart_port *port = &up->port;
-+ unsigned long flags;
-+ bool is_console;
-+
-+ is_console = uart_console(port);
-+
-+ if (is_console)
-+ printk_cpu_sync_get_irqsave(flags);
-+
-+ serial_out(up, UART_IER, ier);
-+
-+ if (is_console)
-+ printk_cpu_sync_put_irqrestore(flags);
-+}
-+
- static inline bool serial8250_set_THRI(struct uart_8250_port *up)
- {
- if (up->ier & UART_IER_THRI)
- return false;
- up->ier |= UART_IER_THRI;
-- serial_out(up, UART_IER, up->ier);
-+ serial8250_set_IER(up, up->ier);
- return true;
- }
-
-@@ -190,7 +227,7 @@ static inline bool serial8250_clear_THRI(struct uart_8250_port *up)
- if (!(up->ier & UART_IER_THRI))
- return false;
- up->ier &= ~UART_IER_THRI;
-- serial_out(up, UART_IER, up->ier);
-+ serial8250_set_IER(up, up->ier);
- return true;
- }
-
-diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
-index 9d2a7856784f..7cc6b527c088 100644
---- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
-+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
-@@ -278,7 +278,7 @@ static void __aspeed_vuart_set_throttle(struct uart_8250_port *up,
- up->ier &= ~irqs;
- if (!throttle)
- up->ier |= irqs;
-- serial_out(up, UART_IER, up->ier);
-+ serial8250_set_IER(up, up->ier);
- }
- static void aspeed_vuart_set_throttle(struct uart_port *port, bool throttle)
- {
-diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
-index ffc7f67e27e3..8b211e668bc0 100644
---- a/drivers/tty/serial/8250/8250_bcm7271.c
-+++ b/drivers/tty/serial/8250/8250_bcm7271.c
-@@ -609,7 +609,7 @@ static int brcmuart_startup(struct uart_port *port)
- * will handle this.
- */
- up->ier &= ~UART_IER_RDI;
-- serial_port_out(port, UART_IER, up->ier);
-+ serial8250_set_IER(up, up->ier);
-
- priv->tx_running = false;
- priv->dma.rx_dma = NULL;
-@@ -775,10 +775,12 @@ static int brcmuart_handle_irq(struct uart_port *p)
- unsigned int iir = serial_port_in(p, UART_IIR);
- struct brcmuart_priv *priv = p->private_data;
- struct uart_8250_port *up = up_to_u8250p(p);
-+ unsigned long cs_flags;
- unsigned int status;
- unsigned long flags;
- unsigned int ier;
- unsigned int mcr;
-+ bool is_console;
- int handled = 0;
-
- /*
-@@ -789,6 +791,10 @@ static int brcmuart_handle_irq(struct uart_port *p)
- spin_lock_irqsave(&p->lock, flags);
- status = serial_port_in(p, UART_LSR);
- if ((status & UART_LSR_DR) == 0) {
-+ is_console = uart_console(p);
-+
-+ if (is_console)
-+ printk_cpu_sync_get_irqsave(cs_flags);
-
- ier = serial_port_in(p, UART_IER);
- /*
-@@ -809,6 +815,9 @@ static int brcmuart_handle_irq(struct uart_port *p)
- serial_port_in(p, UART_RX);
- }
-
-+ if (is_console)
-+ printk_cpu_sync_put_irqrestore(cs_flags);
-+
- handled = 1;
- }
- spin_unlock_irqrestore(&p->lock, flags);
-@@ -823,8 +832,10 @@ static enum hrtimer_restart brcmuart_hrtimer_func(struct hrtimer *t)
- struct brcmuart_priv *priv = container_of(t, struct brcmuart_priv, hrt);
- struct uart_port *p = priv->up;
- struct uart_8250_port *up = up_to_u8250p(p);
-+ unsigned long cs_flags;
- unsigned int status;
- unsigned long flags;
-+ bool is_console;
-
- if (priv->shutdown)
- return HRTIMER_NORESTART;
-@@ -846,12 +857,20 @@ static enum hrtimer_restart brcmuart_hrtimer_func(struct hrtimer *t)
- /* re-enable receive unless upper layer has disabled it */
- if ((up->ier & (UART_IER_RLSI | UART_IER_RDI)) ==
- (UART_IER_RLSI | UART_IER_RDI)) {
-+ is_console = uart_console(p);
-+
-+ if (is_console)
-+ printk_cpu_sync_get_irqsave(cs_flags);
-+
- status = serial_port_in(p, UART_IER);
- status |= (UART_IER_RLSI | UART_IER_RDI);
- serial_port_out(p, UART_IER, status);
- status = serial_port_in(p, UART_MCR);
- status |= UART_MCR_RTS;
- serial_port_out(p, UART_MCR, status);
-+
-+ if (is_console)
-+ printk_cpu_sync_put_irqrestore(cs_flags);
- }
- spin_unlock_irqrestore(&p->lock, flags);
- return HRTIMER_NORESTART;
-diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
-index 81a5dab1a828..536f639ff56c 100644
---- a/drivers/tty/serial/8250/8250_core.c
-+++ b/drivers/tty/serial/8250/8250_core.c
-@@ -255,8 +255,11 @@ static void serial8250_timeout(struct timer_list *t)
- static void serial8250_backup_timeout(struct timer_list *t)
- {
- struct uart_8250_port *up = from_timer(up, t, timer);
-+ struct uart_port *port = &up->port;
- unsigned int iir, ier = 0, lsr;
-+ unsigned long cs_flags;
- unsigned long flags;
-+ bool is_console;
-
- spin_lock_irqsave(&up->port.lock, flags);
-
-@@ -265,8 +268,16 @@ static void serial8250_backup_timeout(struct timer_list *t)
- * based handler.
- */
- if (up->port.irq) {
-+ is_console = uart_console(port);
-+
-+ if (is_console)
-+ printk_cpu_sync_get_irqsave(cs_flags);
-+
- ier = serial_in(up, UART_IER);
- serial_out(up, UART_IER, 0);
-+
-+ if (is_console)
-+ printk_cpu_sync_put_irqrestore(cs_flags);
- }
-
- iir = serial_in(up, UART_IIR);
-@@ -289,7 +300,7 @@ static void serial8250_backup_timeout(struct timer_list *t)
- serial8250_tx_chars(up);
-
- if (up->port.irq)
-- serial_out(up, UART_IER, ier);
-+ serial8250_set_IER(up, ier);
-
- spin_unlock_irqrestore(&up->port.lock, flags);
-
-@@ -575,6 +586,14 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
-
- #ifdef CONFIG_SERIAL_8250_CONSOLE
-
-+static void univ8250_console_write_atomic(struct console *co, const char *s,
-+ unsigned int count)
-+{
-+ struct uart_8250_port *up = &serial8250_ports[co->index];
-+
-+ serial8250_console_write_atomic(up, s, count);
-+}
-+
- static void univ8250_console_write(struct console *co, const char *s,
- unsigned int count)
- {
-@@ -668,6 +687,7 @@ static int univ8250_console_match(struct console *co, char *name, int idx,
-
- static struct console univ8250_console = {
- .name = "ttyS",
-+ .write_atomic = univ8250_console_write_atomic,
- .write = univ8250_console_write,
- .device = uart_console_device,
- .setup = univ8250_console_setup,
-@@ -961,7 +981,7 @@ static void serial_8250_overrun_backoff_work(struct work_struct *work)
- spin_lock_irqsave(&port->lock, flags);
- up->ier |= UART_IER_RLSI | UART_IER_RDI;
- up->port.read_status_mask |= UART_LSR_DR;
-- serial_out(up, UART_IER, up->ier);
-+ serial8250_set_IER(up, up->ier);
- spin_unlock_irqrestore(&port->lock, flags);
- }
-
-diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
-index b406cba10b0e..246c32c75a4c 100644
---- a/drivers/tty/serial/8250/8250_exar.c
-+++ b/drivers/tty/serial/8250/8250_exar.c
-@@ -189,6 +189,8 @@ static void xr17v35x_set_divisor(struct uart_port *p, unsigned int baud,
-
- static int xr17v35x_startup(struct uart_port *port)
- {
-+ struct uart_8250_port *up = up_to_u8250p(port);
-+
- /*
- * First enable access to IER [7:5], ISR [5:4], FCR [5:4],
- * MCR [7:5] and MSR [7:0]
-@@ -199,7 +201,7 @@ static int xr17v35x_startup(struct uart_port *port)
- * Make sure all interrups are masked until initialization is
- * complete and the FIFOs are cleared
- */
-- serial_port_out(port, UART_IER, 0);
-+ serial8250_set_IER(up, 0);
-
- return serial8250_do_startup(port);
- }
-diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
-index 8adfaa183f77..eaf148245a10 100644
---- a/drivers/tty/serial/8250/8250_fsl.c
-+++ b/drivers/tty/serial/8250/8250_fsl.c
-@@ -58,7 +58,8 @@ int fsl8250_handle_irq(struct uart_port *port)
- if ((orig_lsr & UART_LSR_OE) && (up->overrun_backoff_time_ms > 0)) {
- unsigned long delay;
-
-- up->ier = port->serial_in(port, UART_IER);
-+ up->ier = serial8250_in_IER(up);
-+
- if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
- port->ops->stop_rx(port);
- } else {
-diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
-index 2b2f5d8d24b9..2b78e6c394fb 100644
---- a/drivers/tty/serial/8250/8250_ingenic.c
-+++ b/drivers/tty/serial/8250/8250_ingenic.c
-@@ -146,6 +146,7 @@ OF_EARLYCON_DECLARE(x1000_uart, "ingenic,x1000-uart",
-
- static void ingenic_uart_serial_out(struct uart_port *p, int offset, int value)
- {
-+ struct uart_8250_port *up = up_to_u8250p(p);
- int ier;
-
- switch (offset) {
-@@ -167,7 +168,7 @@ static void ingenic_uart_serial_out(struct uart_port *p, int offset, int value)
- * If we have enabled modem status IRQs we should enable
- * modem mode.
- */
-- ier = p->serial_in(p, UART_IER);
-+ ier = serial8250_in_IER(up);
-
- if (ier & UART_IER_MSI)
- value |= UART_MCR_MDCE | UART_MCR_FCM;
-diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
-index fb1d5ec0940e..3e7203909d6a 100644
---- a/drivers/tty/serial/8250/8250_mtk.c
-+++ b/drivers/tty/serial/8250/8250_mtk.c
-@@ -222,12 +222,40 @@ static void mtk8250_shutdown(struct uart_port *port)
-
- static void mtk8250_disable_intrs(struct uart_8250_port *up, int mask)
- {
-- serial_out(up, UART_IER, serial_in(up, UART_IER) & (~mask));
-+ struct uart_port *port = &up->port;
-+ unsigned long flags;
-+ bool is_console;
-+ int ier;
-+
-+ is_console = uart_console(port);
-+
-+ if (is_console)
-+ printk_cpu_sync_get_irqsave(flags);
-+
-+ ier = serial_in(up, UART_IER);
-+ serial_out(up, UART_IER, ier & (~mask));
-+
-+ if (is_console)
-+ printk_cpu_sync_put_irqrestore(flags);
- }
-
- static void mtk8250_enable_intrs(struct uart_8250_port *up, int mask)
- {
-- serial_out(up, UART_IER, serial_in(up, UART_IER) | mask);
-+ struct uart_port *port = &up->port;
-+ unsigned long flags;
-+ bool is_console;
-+ int ier;
-+
-+ is_console = uart_console(port);
-+
-+ if (is_console)
-+ printk_cpu_sync_get_irqsave(flags);
-+
-+ ier = serial_in(up, UART_IER);
-+ serial_out(up, UART_IER, ier | mask);
-+
-+ if (is_console)
-+ printk_cpu_sync_put_irqrestore(flags);
- }
-
- static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
-diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
-index 0b04d810b3e6..2b8ad5176399 100644
---- a/drivers/tty/serial/8250/8250_omap.c
-+++ b/drivers/tty/serial/8250/8250_omap.c
-@@ -330,7 +330,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
- /* drop TCR + TLR access, we setup XON/XOFF later */
- serial8250_out_MCR(up, mcr);
-
-- serial_out(up, UART_IER, up->ier);
-+ serial8250_set_IER(up, up->ier);
-
- serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_dl_write(up, priv->quot);
-@@ -520,7 +520,7 @@ static void omap_8250_pm(struct uart_port *port, unsigned int state,
- serial_out(up, UART_EFR, efr | UART_EFR_ECB);
- serial_out(up, UART_LCR, 0);
-
-- serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0);
-+ serial8250_set_IER(up, (state != 0) ? UART_IERX_SLEEP : 0);
- serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(up, UART_EFR, efr);
- serial_out(up, UART_LCR, 0);
-@@ -703,7 +703,7 @@ static int omap_8250_startup(struct uart_port *port)
- goto err;
-
- up->ier = UART_IER_RLSI | UART_IER_RDI;
-- serial_out(up, UART_IER, up->ier);
-+ serial8250_set_IER(up, up->ier);
-
- #ifdef CONFIG_PM
- up->capabilities |= UART_CAP_RPM;
-@@ -744,7 +744,7 @@ static void omap_8250_shutdown(struct uart_port *port)
- serial_out(up, UART_OMAP_EFR2, 0x0);
-
- up->ier = 0;
-- serial_out(up, UART_IER, 0);
-+ serial8250_set_IER(up, 0);
-
- if (up->dma)
- serial8250_release_dma(up);
-@@ -792,7 +792,7 @@ static void omap_8250_unthrottle(struct uart_port *port)
- up->dma->rx_dma(up);
- up->ier |= UART_IER_RLSI | UART_IER_RDI;
- port->read_status_mask |= UART_LSR_DR;
-- serial_out(up, UART_IER, up->ier);
-+ serial8250_set_IER(up, up->ier);
- spin_unlock_irqrestore(&port->lock, flags);
-
- pm_runtime_mark_last_busy(port->dev);
-@@ -883,7 +883,7 @@ static void __dma_rx_complete(void *param)
- __dma_rx_do_complete(p);
- if (!priv->throttled) {
- p->ier |= UART_IER_RLSI | UART_IER_RDI;
-- serial_out(p, UART_IER, p->ier);
-+ serial8250_set_IER(p, p->ier);
- if (!(priv->habit & UART_HAS_EFR2))
- omap_8250_rx_dma(p);
- }
-@@ -940,7 +940,7 @@ static int omap_8250_rx_dma(struct uart_8250_port *p)
- * callback to run.
- */
- p->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
-- serial_out(p, UART_IER, p->ier);
-+ serial8250_set_IER(p, p->ier);
- }
- goto out;
- }
-@@ -1153,12 +1153,12 @@ static void am654_8250_handle_rx_dma(struct uart_8250_port *up, u8 iir,
- * periodic timeouts, re-enable interrupts.
- */
- up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
-- serial_out(up, UART_IER, up->ier);
-+ serial8250_set_IER(up, up->ier);
- omap_8250_rx_dma_flush(up);
- serial_in(up, UART_IIR);
- serial_out(up, UART_OMAP_EFR2, 0x0);
- up->ier |= UART_IER_RLSI | UART_IER_RDI;
-- serial_out(up, UART_IER, up->ier);
-+ serial8250_set_IER(up, up->ier);
- }
- }
-
-diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
-index 8efe31448df3..975c16267196 100644
---- a/drivers/tty/serial/8250/8250_port.c
-+++ b/drivers/tty/serial/8250/8250_port.c
-@@ -744,7 +744,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
- serial_out(p, UART_EFR, UART_EFR_ECB);
- serial_out(p, UART_LCR, 0);
- }
-- serial_out(p, UART_IER, sleep ? UART_IERX_SLEEP : 0);
-+ serial8250_set_IER(p, sleep ? UART_IERX_SLEEP : 0);
- if (p->capabilities & UART_CAP_EFR) {
- serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(p, UART_EFR, efr);
-@@ -755,12 +755,29 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
- serial8250_rpm_put(p);
- }
-
--static void serial8250_clear_IER(struct uart_8250_port *up)
-+static unsigned int serial8250_clear_IER(struct uart_8250_port *up)
- {
-+ struct uart_port *port = &up->port;
-+ unsigned int clearval = 0;
-+ unsigned long flags;
-+ bool is_console;
-+ unsigned int prior;
-+
-+ is_console = uart_console(port);
-+
- if (up->capabilities & UART_CAP_UUE)
-- serial_out(up, UART_IER, UART_IER_UUE);
-- else
-- serial_out(up, UART_IER, 0);
-+ clearval = UART_IER_UUE;
-+
-+ if (is_console)
-+ printk_cpu_sync_get_irqsave(flags);
-+
-+ prior = serial_in(up, UART_IER);
-+ serial_out(up, UART_IER, clearval);
-+
-+ if (is_console)
-+ printk_cpu_sync_put_irqrestore(flags);
-+
-+ return prior;
- }
-
- #ifdef CONFIG_SERIAL_8250_RSA
-@@ -1026,8 +1043,11 @@ static int broken_efr(struct uart_8250_port *up)
- */
- static void autoconfig_16550a(struct uart_8250_port *up)
- {
-+ struct uart_port *port = &up->port;
- unsigned char status1, status2;
- unsigned int iersave;
-+ unsigned long flags;
-+ bool is_console;
-
- up->port.type = PORT_16550A;
- up->capabilities |= UART_CAP_FIFO;
-@@ -1139,6 +1159,11 @@ static void autoconfig_16550a(struct uart_8250_port *up)
- return;
- }
-
-+ is_console = uart_console(port);
-+
-+ if (is_console)
-+ printk_cpu_sync_get_irqsave(flags);
-+
- /*
- * Try writing and reading the UART_IER_UUE bit (b6).
- * If it works, this is probably one of the Xscale platform's
-@@ -1174,6 +1199,9 @@ static void autoconfig_16550a(struct uart_8250_port *up)
- }
- serial_out(up, UART_IER, iersave);
-
-+ if (is_console)
-+ printk_cpu_sync_put_irqrestore(flags);
-+
- /*
- * We distinguish between 16550A and U6 16550A by counting
- * how many bytes are in the FIFO.
-@@ -1196,8 +1224,10 @@ static void autoconfig(struct uart_8250_port *up)
- unsigned char status1, scratch, scratch2, scratch3;
- unsigned char save_lcr, save_mcr;
- struct uart_port *port = &up->port;
-+ unsigned long cs_flags;
- unsigned long flags;
- unsigned int old_capabilities;
-+ bool is_console;
-
- if (!port->iobase && !port->mapbase && !port->membase)
- return;
-@@ -1215,6 +1245,11 @@ static void autoconfig(struct uart_8250_port *up)
- up->bugs = 0;
-
- if (!(port->flags & UPF_BUGGY_UART)) {
-+ is_console = uart_console(port);
-+
-+ if (is_console)
-+ printk_cpu_sync_get_irqsave(cs_flags);
-+
- /*
- * Do a simple existence test first; if we fail this,
- * there's no point trying anything else.
-@@ -1244,6 +1279,10 @@ static void autoconfig(struct uart_8250_port *up)
- #endif
- scratch3 = serial_in(up, UART_IER) & 0x0f;
- serial_out(up, UART_IER, scratch);
-+
-+ if (is_console)
-+ printk_cpu_sync_put_irqrestore(cs_flags);
-+
- if (scratch2 != 0 || scratch3 != 0x0F) {
- /*
- * We failed; there's nothing here
-@@ -1367,7 +1406,9 @@ static void autoconfig_irq(struct uart_8250_port *up)
- unsigned char save_mcr, save_ier;
- unsigned char save_ICP = 0;
- unsigned int ICP = 0;
-+ unsigned long flags;
- unsigned long irqs;
-+ bool is_console;
- int irq;
-
- if (port->flags & UPF_FOURPORT) {
-@@ -1377,8 +1418,12 @@ static void autoconfig_irq(struct uart_8250_port *up)
- inb_p(ICP);
- }
-
-- if (uart_console(port))
-+ is_console = uart_console(port);
-+
-+ if (is_console) {
- console_lock();
-+ printk_cpu_sync_get_irqsave(flags);
-+ }
-
- /* forget possible initially masked and pending IRQ */
- probe_irq_off(probe_irq_on());
-@@ -1410,8 +1455,10 @@ static void autoconfig_irq(struct uart_8250_port *up)
- if (port->flags & UPF_FOURPORT)
- outb_p(save_ICP, ICP);
-
-- if (uart_console(port))
-+ if (is_console) {
-+ printk_cpu_sync_put_irqrestore(flags);
- console_unlock();
-+ }
-
- port->irq = (irq > 0) ? irq : 0;
- }
-@@ -1424,7 +1471,7 @@ static void serial8250_stop_rx(struct uart_port *port)
-
- up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
- up->port.read_status_mask &= ~UART_LSR_DR;
-- serial_port_out(port, UART_IER, up->ier);
-+ serial8250_set_IER(up, up->ier);
-
- serial8250_rpm_put(up);
- }
-@@ -1454,7 +1501,7 @@ void serial8250_em485_stop_tx(struct uart_8250_port *p)
- serial8250_clear_and_reinit_fifos(p);
-
- p->ier |= UART_IER_RLSI | UART_IER_RDI;
-- serial_port_out(&p->port, UART_IER, p->ier);
-+ serial8250_set_IER(p, p->ier);
- }
- }
- EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx);
-@@ -1703,7 +1750,7 @@ static void serial8250_disable_ms(struct uart_port *port)
- mctrl_gpio_disable_ms(up->gpios);
-
- up->ier &= ~UART_IER_MSI;
-- serial_port_out(port, UART_IER, up->ier);
-+ serial8250_set_IER(up, up->ier);
- }
-
- static void serial8250_enable_ms(struct uart_port *port)
-@@ -1719,7 +1766,7 @@ static void serial8250_enable_ms(struct uart_port *port)
- up->ier |= UART_IER_MSI;
-
- serial8250_rpm_get(up);
-- serial_port_out(port, UART_IER, up->ier);
-+ serial8250_set_IER(up, up->ier);
- serial8250_rpm_put(up);
- }
-
-@@ -2174,8 +2221,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
- /*
- * First save the IER then disable the interrupts
- */
-- ier = serial_port_in(port, UART_IER);
-- serial8250_clear_IER(up);
-+ ier = serial8250_clear_IER(up);
-
- wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
- /*
-@@ -2188,7 +2234,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
- * and restore the IER
- */
- wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
-- serial_port_out(port, UART_IER, ier);
-+ serial8250_set_IER(up, ier);
- serial8250_rpm_put(up);
- }
-
-@@ -2197,8 +2243,10 @@ static void serial8250_put_poll_char(struct uart_port *port,
- int serial8250_do_startup(struct uart_port *port)
- {
- struct uart_8250_port *up = up_to_u8250p(port);
-+ unsigned long cs_flags;
- unsigned long flags;
- unsigned char iir;
-+ bool is_console;
- int retval;
- u16 lsr;
-
-@@ -2219,7 +2267,7 @@ int serial8250_do_startup(struct uart_port *port)
- up->acr = 0;
- serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_port_out(port, UART_EFR, UART_EFR_ECB);
-- serial_port_out(port, UART_IER, 0);
-+ serial8250_set_IER(up, 0);
- serial_port_out(port, UART_LCR, 0);
- serial_icr_write(up, UART_CSR, 0); /* Reset the UART */
- serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
-@@ -2229,7 +2277,7 @@ int serial8250_do_startup(struct uart_port *port)
-
- if (port->type == PORT_DA830) {
- /* Reset the port */
-- serial_port_out(port, UART_IER, 0);
-+ serial8250_set_IER(up, 0);
- serial_port_out(port, UART_DA830_PWREMU_MGMT, 0);
- mdelay(10);
-
-@@ -2328,6 +2376,8 @@ int serial8250_do_startup(struct uart_port *port)
- if (retval)
- goto out;
-
-+ is_console = uart_console(port);
-+
- if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
- unsigned char iir1;
-
-@@ -2344,6 +2394,9 @@ int serial8250_do_startup(struct uart_port *port)
- */
- spin_lock_irqsave(&port->lock, flags);
-
-+ if (is_console)
-+ printk_cpu_sync_get_irqsave(cs_flags);
-+
- wait_for_xmitr(up, UART_LSR_THRE);
- serial_port_out_sync(port, UART_IER, UART_IER_THRI);
- udelay(1); /* allow THRE to set */
-@@ -2354,6 +2407,9 @@ int serial8250_do_startup(struct uart_port *port)
- iir = serial_port_in(port, UART_IIR);
- serial_port_out(port, UART_IER, 0);
-
-+ if (is_console)
-+ printk_cpu_sync_put_irqrestore(cs_flags);
-+
- spin_unlock_irqrestore(&port->lock, flags);
-
- if (port->irqflags & IRQF_SHARED)
-@@ -2408,10 +2464,14 @@ int serial8250_do_startup(struct uart_port *port)
- * Do a quick test to see if we receive an interrupt when we enable
- * the TX irq.
- */
-+ if (is_console)
-+ printk_cpu_sync_get_irqsave(cs_flags);
- serial_port_out(port, UART_IER, UART_IER_THRI);
- lsr = serial_port_in(port, UART_LSR);
- iir = serial_port_in(port, UART_IIR);
- serial_port_out(port, UART_IER, 0);
-+ if (is_console)
-+ printk_cpu_sync_put_irqrestore(cs_flags);
-
- if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) {
- if (!(up->bugs & UART_BUG_TXEN)) {
-@@ -2443,7 +2503,7 @@ int serial8250_do_startup(struct uart_port *port)
- if (up->dma) {
- const char *msg = NULL;
-
-- if (uart_console(port))
-+ if (is_console)
- msg = "forbid DMA for kernel console";
- else if (serial8250_request_dma(up))
- msg = "failed to request DMA";
-@@ -2494,7 +2554,7 @@ void serial8250_do_shutdown(struct uart_port *port)
- */
- spin_lock_irqsave(&port->lock, flags);
- up->ier = 0;
-- serial_port_out(port, UART_IER, 0);
-+ serial8250_set_IER(up, 0);
- spin_unlock_irqrestore(&port->lock, flags);
-
- synchronize_irq(port->irq);
-@@ -2856,7 +2916,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
- if (up->capabilities & UART_CAP_RTOIE)
- up->ier |= UART_IER_RTOIE;
-
-- serial_port_out(port, UART_IER, up->ier);
-+ serial8250_set_IER(up, up->ier);
-
- if (up->capabilities & UART_CAP_EFR) {
- unsigned char efr = 0;
-@@ -3321,7 +3381,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_defaults);
-
- #ifdef CONFIG_SERIAL_8250_CONSOLE
-
--static void serial8250_console_putchar(struct uart_port *port, unsigned char ch)
-+static void serial8250_console_putchar_locked(struct uart_port *port, unsigned char ch)
- {
- struct uart_8250_port *up = up_to_u8250p(port);
-
-@@ -3329,6 +3389,18 @@ static void serial8250_console_putchar(struct uart_port *port, unsigned char ch)
- serial_port_out(port, UART_TX, ch);
- }
-
-+static void serial8250_console_putchar(struct uart_port *port, unsigned char ch)
-+{
-+ struct uart_8250_port *up = up_to_u8250p(port);
-+ unsigned long flags;
-+
-+ wait_for_xmitr(up, UART_LSR_THRE);
-+
-+ printk_cpu_sync_get_irqsave(flags);
-+ serial8250_console_putchar_locked(port, ch);
-+ printk_cpu_sync_put_irqrestore(flags);
-+}
-+
- /*
- * Restore serial console when h/w power-off detected
- */
-@@ -3355,6 +3427,32 @@ static void serial8250_console_restore(struct uart_8250_port *up)
- serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS);
- }
-
-+void serial8250_console_write_atomic(struct uart_8250_port *up,
-+ const char *s, unsigned int count)
-+{
-+ struct uart_port *port = &up->port;
-+ unsigned long flags;
-+ unsigned int ier;
-+
-+ printk_cpu_sync_get_irqsave(flags);
-+
-+ touch_nmi_watchdog();
-+
-+ ier = serial8250_clear_IER(up);
-+
-+ if (atomic_fetch_inc(&up->console_printing)) {
-+ uart_console_write(port, "\n", 1,
-+ serial8250_console_putchar_locked);
-+ }
-+ uart_console_write(port, s, count, serial8250_console_putchar_locked);
-+ atomic_dec(&up->console_printing);
-+
-+ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
-+ serial8250_set_IER(up, ier);
-+
-+ printk_cpu_sync_put_irqrestore(flags);
-+}
-+
- /*
- * Print a string to the serial port using the device FIFO
- *
-@@ -3400,20 +3498,15 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
- struct uart_port *port = &up->port;
- unsigned long flags;
- unsigned int ier, use_fifo;
-- int locked = 1;
-
- touch_nmi_watchdog();
-
-- if (oops_in_progress)
-- locked = spin_trylock_irqsave(&port->lock, flags);
-- else
-- spin_lock_irqsave(&port->lock, flags);
-+ spin_lock_irqsave(&port->lock, flags);
-
- /*
- * First save the IER then disable the interrupts
- */
-- ier = serial_port_in(port, UART_IER);
-- serial8250_clear_IER(up);
-+ ier = serial8250_clear_IER(up);
-
- /* check scratch reg to see if port powered off during system sleep */
- if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
-@@ -3447,10 +3540,12 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
- */
- !(up->port.flags & UPF_CONS_FLOW);
-
-+ atomic_inc(&up->console_printing);
- if (likely(use_fifo))
- serial8250_console_fifo_write(up, s, count);
- else
- uart_console_write(port, s, count, serial8250_console_putchar);
-+ atomic_dec(&up->console_printing);
-
- /*
- * Finally, wait for transmitter to become empty
-@@ -3463,8 +3558,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
- if (em485->tx_stopped)
- up->rs485_stop_tx(up);
- }
--
-- serial_port_out(port, UART_IER, ier);
-+ serial8250_set_IER(up, ier);
-
- /*
- * The receive handling will happen properly because the
-@@ -3476,8 +3570,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
- if (up->msr_saved_flags)
- serial8250_modem_status(up);
-
-- if (locked)
-- spin_unlock_irqrestore(&port->lock, flags);
-+ spin_unlock_irqrestore(&port->lock, flags);
- }
-
- static unsigned int probe_baud(struct uart_port *port)
-@@ -3497,6 +3590,7 @@ static unsigned int probe_baud(struct uart_port *port)
-
- int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
- {
-+ struct uart_8250_port *up = up_to_u8250p(port);
- int baud = 9600;
- int bits = 8;
- int parity = 'n';
-@@ -3506,6 +3600,8 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
- if (!port->iobase && !port->membase)
- return -ENODEV;
-
-+ atomic_set(&up->console_printing, 0);
-+
- if (options)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
- else if (probe)
-diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
-index 583a340f9934..1f31320820a6 100644
---- a/drivers/tty/serial/8250/Kconfig
-+++ b/drivers/tty/serial/8250/Kconfig
-@@ -9,6 +9,7 @@ config SERIAL_8250
- depends on !S390
- select SERIAL_CORE
- select SERIAL_MCTRL_GPIO if GPIOLIB
-+ select HAVE_ATOMIC_CONSOLE
- help
- This selects whether you want to include the driver for the standard
- serial ports. The standard answer is Y. People who might say N
-diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
-index 79b328861c5f..35f44352e641 100644
---- a/include/linux/serial_8250.h
-+++ b/include/linux/serial_8250.h
-@@ -7,6 +7,7 @@
- #ifndef _LINUX_SERIAL_8250_H
- #define _LINUX_SERIAL_8250_H
-
-+#include
- #include
- #include
- #include
-@@ -124,6 +125,8 @@ struct uart_8250_port {
- #define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
- unsigned char msr_saved_flags;
-
-+ atomic_t console_printing;
-+
- struct uart_8250_dma *dma;
- const struct uart_8250_ops *ops;
-
-@@ -179,6 +182,8 @@ void serial8250_init_port(struct uart_8250_port *up);
- void serial8250_set_defaults(struct uart_8250_port *up);
- void serial8250_console_write(struct uart_8250_port *up, const char *s,
- unsigned int count);
-+void serial8250_console_write_atomic(struct uart_8250_port *up, const char *s,
-+ unsigned int count);
- int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
- int serial8250_console_exit(struct uart_port *port);
-
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0014-tick-Fix-timer-storm-since-introduction-of-timersd.patch b/buildroot-external/patches/linux/0021-tick-Fix-timer-storm-since-introduction-of-timersd.patch
similarity index 91%
rename from buildroot-external/patches/linux/0014-tick-Fix-timer-storm-since-introduction-of-timersd.patch
rename to buildroot-external/patches/linux/0021-tick-Fix-timer-storm-since-introduction-of-timersd.patch
index 713e4d8c..7c1378b8 100644
--- a/buildroot-external/patches/linux/0014-tick-Fix-timer-storm-since-introduction-of-timersd.patch
+++ b/buildroot-external/patches/linux/0021-tick-Fix-timer-storm-since-introduction-of-timersd.patch
@@ -1,7 +1,7 @@
-From 9220b21155b9f55725886fb46c85df7ee0288ce0 Mon Sep 17 00:00:00 2001
+From ed846d5dfed571d1dc9ab756a1240a93018004b5 Mon Sep 17 00:00:00 2001
From: Frederic Weisbecker
Date: Tue, 5 Apr 2022 03:07:52 +0200
-Subject: [PATCH 14/62] tick: Fix timer storm since introduction of timersd
+Subject: [PATCH 021/195] tick: Fix timer storm since introduction of timersd
If timers are pending while the tick is reprogrammed on nohz_mode, the
next expiry is not armed to fire now, it is delayed one jiffy forward
@@ -80,10 +80,10 @@ index f459b0f27c94..a5091ac97fc6 100644
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
diff --git a/kernel/softirq.c b/kernel/softirq.c
-index 1892af494cdd..ab1fe34326ba 100644
+index 1277abc94228..a4d359e2c2b5 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
-@@ -639,12 +639,7 @@ static inline void tick_irq_exit(void)
+@@ -621,12 +621,7 @@ static inline void tick_irq_exit(void)
#ifdef CONFIG_PREEMPT_RT
DEFINE_PER_CPU(struct task_struct *, timersd);
@@ -98,10 +98,10 @@ index 1892af494cdd..ab1fe34326ba 100644
static void wake_timersd(void)
{
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
-index 798e1841d286..b52e1861b913 100644
+index 5cbd0cee83c0..2d7036714c81 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
-@@ -800,7 +800,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
+@@ -795,7 +795,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
static inline bool local_timer_softirq_pending(void)
{
diff --git a/buildroot-external/patches/linux/0022-printk-avoid-preempt_disable-for-PREEMPT_RT.patch b/buildroot-external/patches/linux/0022-printk-avoid-preempt_disable-for-PREEMPT_RT.patch
deleted file mode 100644
index 7135a6cf..00000000
--- a/buildroot-external/patches/linux/0022-printk-avoid-preempt_disable-for-PREEMPT_RT.patch
+++ /dev/null
@@ -1,95 +0,0 @@
-From ca6770a982bcd5497cd057b5e0a8126c2e589a14 Mon Sep 17 00:00:00 2001
-From: John Ogness
-Date: Fri, 4 Feb 2022 16:01:17 +0106
-Subject: [PATCH 22/62] printk: avoid preempt_disable() for PREEMPT_RT
-
-During non-normal operation, printk() calls will attempt to
-write the messages directly to the consoles. This involves
-using console_trylock() to acquire @console_sem.
-
-Preemption is disabled while directly printing to the consoles
-in order to ensure that the printing task is not scheduled away
-while holding @console_sem, thus blocking all other printers
-and causing delays in printing.
-
-Commit fd5f7cde1b85 ("printk: Never set console_may_schedule in
-console_trylock()") specifically reverted a previous attempt at
-allowing preemption while printing.
-
-However, on PREEMPT_RT systems, disabling preemption while
-printing is not allowed because console drivers typically
-acquire a spin lock (which under PREEMPT_RT is an rtmutex).
-Since direct printing is only used during early boot and
-non-panic dumps, the risks of delayed print output for these
-scenarios will be accepted under PREEMPT_RT.
-
-Signed-off-by: John Ogness
-Signed-off-by: Sebastian Andrzej Siewior
----
- kernel/printk/printk.c | 21 ++++++++++++++++++++-
- 1 file changed, 20 insertions(+), 1 deletion(-)
-
-diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
-index 73b1727087c7..3d0ff49cca29 100644
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -1981,6 +1981,7 @@ static int console_lock_spinning_disable_and_check(void)
- return 1;
- }
-
-+#if !IS_ENABLED(CONFIG_PREEMPT_RT)
- /**
- * console_trylock_spinning - try to get console_lock by busy waiting
- *
-@@ -2054,6 +2055,7 @@ static int console_trylock_spinning(void)
-
- return 1;
- }
-+#endif /* CONFIG_PREEMPT_RT */
-
- /*
- * Call the specified console driver, asking it to write out the specified
-@@ -2393,6 +2395,18 @@ asmlinkage int vprintk_emit(int facility, int level,
-
- /* If called from the scheduler, we can not call up(). */
- if (!in_sched && allow_direct_printing()) {
-+#if IS_ENABLED(CONFIG_PREEMPT_RT)
-+ /*
-+ * Use the non-spinning trylock since PREEMPT_RT does not
-+ * support console lock handovers.
-+ *
-+ * Direct printing will most likely involve taking spinlocks.
-+ * For PREEMPT_RT, this is only allowed if in a preemptible
-+ * context.
-+ */
-+ if (preemptible() && console_trylock())
-+ console_unlock();
-+#else
- /*
- * The caller may be holding system-critical or
- * timing-sensitive locks. Disable preemption during direct
-@@ -2410,6 +2424,7 @@ asmlinkage int vprintk_emit(int facility, int level,
- if (console_trylock_spinning())
- console_unlock();
- preempt_enable();
-+#endif
- }
-
- if (in_sched)
-@@ -3119,8 +3134,12 @@ static bool console_emit_next_record_transferable(struct console *con, char *tex
- /*
- * Handovers are only supported if threaded printers are atomically
- * blocked. The context taking over the console_lock may be atomic.
-+ *
-+ * PREEMPT_RT also does not support handovers because the spinning
-+ * waiter can cause large latencies.
- */
-- if (!console_kthreads_atomically_blocked()) {
-+ if (!console_kthreads_atomically_blocked() ||
-+ IS_ENABLED(CONFIG_PREEMPT_RT)) {
- *handover = false;
- handover = NULL;
- }
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0015-softirq-Wake-ktimers-thread-also-in-softirq.patch b/buildroot-external/patches/linux/0022-softirq-Wake-ktimers-thread-also-in-softirq.patch
similarity index 86%
rename from buildroot-external/patches/linux/0015-softirq-Wake-ktimers-thread-also-in-softirq.patch
rename to buildroot-external/patches/linux/0022-softirq-Wake-ktimers-thread-also-in-softirq.patch
index 955afe2b..11a54908 100644
--- a/buildroot-external/patches/linux/0015-softirq-Wake-ktimers-thread-also-in-softirq.patch
+++ b/buildroot-external/patches/linux/0022-softirq-Wake-ktimers-thread-also-in-softirq.patch
@@ -1,7 +1,7 @@
-From 7c8c231b4a43dd06e6c90d5c820342ee3f8e9130 Mon Sep 17 00:00:00 2001
+From cb9cea518effb1b12b36be953f46c66fcca40c2f Mon Sep 17 00:00:00 2001
From: Junxiao Chang
Date: Mon, 20 Feb 2023 09:12:20 +0100
-Subject: [PATCH 15/62] softirq: Wake ktimers thread also in softirq.
+Subject: [PATCH 022/195] softirq: Wake ktimers thread also in softirq.
If the hrtimer is raised while a softirq is processed then it does not
wake the corresponding ktimers thread. This is due to the optimisation in the
@@ -22,10 +22,10 @@ Signed-off-by: Sebastian Andrzej Siewior
1 file changed, 5 insertions(+), 6 deletions(-)
diff --git a/kernel/softirq.c b/kernel/softirq.c
-index ab1fe34326ba..82f3e68fbe22 100644
+index a4d359e2c2b5..c2474cc4fa51 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
-@@ -664,13 +664,12 @@ static inline void __irq_exit_rcu(void)
+@@ -646,13 +646,12 @@ static inline void __irq_exit_rcu(void)
#endif
account_hardirq_exit(current);
preempt_count_sub(HARDIRQ_OFFSET);
diff --git a/buildroot-external/patches/linux/0017-zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch b/buildroot-external/patches/linux/0023-zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch
similarity index 66%
rename from buildroot-external/patches/linux/0017-zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch
rename to buildroot-external/patches/linux/0023-zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch
index 711d54c1..9a8c1a0e 100644
--- a/buildroot-external/patches/linux/0017-zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch
+++ b/buildroot-external/patches/linux/0023-zram-Replace-bit-spinlocks-with-spinlock_t-for-PREEM.patch
@@ -1,31 +1,33 @@
-From 5881f1272a6bf4dc4c7553942f95fa850416700c Mon Sep 17 00:00:00 2001
+From d25c9b1588e64c6b128e958bdb998668f65bd03a Mon Sep 17 00:00:00 2001
From: Mike Galbraith
Date: Thu, 31 Mar 2016 04:08:28 +0200
-Subject: [PATCH 17/62] zram: Replace bit spinlocks with spinlock_t for
+Subject: [PATCH 023/195] zram: Replace bit spinlocks with spinlock_t for
PREEMPT_RT.
-The bit spinlock disables preemption on PREEMPT_RT. With disabled preemption it
-is not allowed to acquire other sleeping locks which includes invoking
-zs_free().
+The bit spinlock disables preemption. The spinlock_t lock becomes a sleeping
+lock on PREEMPT_RT and it can not be acquired in this context. In this locked
+section, zs_free() acquires a zs_pool::lock, and there is access to
+zram::wb_limit_lock.
-Use a spinlock_t on PREEMPT_RT for locking and set/ clear ZRAM_LOCK after the
-lock has been acquired/ dropped.
+Use a spinlock_t on PREEMPT_RT for locking and set/ clear ZRAM_LOCK bit after
+the lock has been acquired/ dropped.
Signed-off-by: Mike Galbraith
Signed-off-by: Sebastian Andrzej Siewior
Link: https://lkml.kernel.org/r/YqIbMuHCPiQk+Ac2@linutronix.de
+Link: https://lore.kernel.org/20230323161830.jFbWCosd@linutronix.de
---
- drivers/block/zram/zram_drv.c | 36 +++++++++++++++++++++++++++++++++++
+ drivers/block/zram/zram_drv.c | 37 +++++++++++++++++++++++++++++++++++
drivers/block/zram/zram_drv.h | 3 +++
- 2 files changed, 39 insertions(+)
+ 2 files changed, 40 insertions(+)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
-index 966aab902d19..ee69e4443691 100644
+index 06673c6ca255..a5d0f7c06342 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
-@@ -57,6 +57,40 @@ static void zram_free_page(struct zram *zram, size_t index);
- static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset, struct bio *bio);
+@@ -57,6 +57,41 @@ static void zram_free_page(struct zram *zram, size_t index);
+ static int zram_read_page(struct zram *zram, struct page *page, u32 index,
+ struct bio *parent);
+#ifdef CONFIG_PREEMPT_RT
+static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages)
@@ -61,10 +63,11 @@ index 966aab902d19..ee69e4443691 100644
+#else
+
+static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) { }
-
++
static int zram_slot_trylock(struct zram *zram, u32 index)
{
-@@ -72,6 +106,7 @@ static void zram_slot_unlock(struct zram *zram, u32 index)
+ return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
+@@ -71,6 +106,7 @@ static void zram_slot_unlock(struct zram *zram, u32 index)
{
bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
}
@@ -72,7 +75,7 @@ index 966aab902d19..ee69e4443691 100644
static inline bool init_done(struct zram *zram)
{
-@@ -1187,6 +1222,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
+@@ -1245,6 +1281,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
if (!huge_class_size)
huge_class_size = zs_huge_class_size(zram->mem_pool);
@@ -81,10 +84,10 @@ index 966aab902d19..ee69e4443691 100644
}
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
-index a2bda53020fd..ae7950b26db5 100644
+index ca7a15bd4845..e64eb607eb45 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
-@@ -62,6 +62,9 @@ struct zram_table_entry {
+@@ -69,6 +69,9 @@ struct zram_table_entry {
unsigned long element;
};
unsigned long flags;
diff --git a/buildroot-external/patches/linux/0024-preempt-Put-preempt_enable-within-an-instrumentation.patch b/buildroot-external/patches/linux/0024-preempt-Put-preempt_enable-within-an-instrumentation.patch
new file mode 100644
index 00000000..e2fffea1
--- /dev/null
+++ b/buildroot-external/patches/linux/0024-preempt-Put-preempt_enable-within-an-instrumentation.patch
@@ -0,0 +1,52 @@
+From 537326d9f40fb9b84eb9df67a0f8da0b4deee025 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior
+Date: Wed, 8 Mar 2023 16:29:38 +0100
+Subject: [PATCH 024/195] preempt: Put preempt_enable() within an
+ instrumentation*() section.
+
+Callers of preempt_enable() can be within an noinstr section leading to:
+| vmlinux.o: warning: objtool: native_sched_clock+0x97: call to preempt_schedule_notrace_thunk() leaves .noinstr.text section
+| vmlinux.o: warning: objtool: kvm_clock_read+0x22: call to preempt_schedule_notrace_thunk() leaves .noinstr.text section
+| vmlinux.o: warning: objtool: local_clock+0xb4: call to preempt_schedule_notrace_thunk() leaves .noinstr.text section
+| vmlinux.o: warning: objtool: enter_from_user_mode+0xea: call to preempt_schedule_thunk() leaves .noinstr.text section
+| vmlinux.o: warning: objtool: syscall_enter_from_user_mode+0x140: call to preempt_schedule_thunk() leaves .noinstr.text section
+| vmlinux.o: warning: objtool: syscall_enter_from_user_mode_prepare+0xf2: call to preempt_schedule_thunk() leaves .noinstr.text section
+| vmlinux.o: warning: objtool: irqentry_enter_from_user_mode+0xea: call to preempt_schedule_thunk() leaves .noinstr.text section
+
+Signed-off-by: Sebastian Andrzej Siewior
+Link: https://lore.kernel.org/r/20230309072724.3F6zRkvw@linutronix.de
+---
+ include/linux/preempt.h | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 9aa6358a1a16..cd16f0330fba 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -230,15 +230,21 @@ do { \
+ #define preempt_enable() \
+ do { \
+ barrier(); \
+- if (unlikely(preempt_count_dec_and_test())) \
++ if (unlikely(preempt_count_dec_and_test())) { \
++ instrumentation_begin(); \
+ __preempt_schedule(); \
++ instrumentation_end(); \
++ } \
+ } while (0)
+
+ #define preempt_enable_notrace() \
+ do { \
+ barrier(); \
+- if (unlikely(__preempt_count_dec_and_test())) \
++ if (unlikely(__preempt_count_dec_and_test())) { \
++ instrumentation_begin(); \
+ __preempt_schedule_notrace(); \
++ instrumentation_end(); \
++ } \
+ } while (0)
+
+ #define preempt_check_resched() \
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0025-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch b/buildroot-external/patches/linux/0025-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch
new file mode 100644
index 00000000..34da5d05
--- /dev/null
+++ b/buildroot-external/patches/linux/0025-sched-core-Provide-a-method-to-check-if-a-task-is-PI.patch
@@ -0,0 +1,61 @@
+From a3b4b96acf6a09da67d22e7aa8a62f250bfc6e25 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior
+Date: Fri, 4 Aug 2023 13:30:37 +0200
+Subject: [PATCH 025/195] sched/core: Provide a method to check if a task is
+ PI-boosted.
+
+Provide a method to check if a task inherited the priority from another
+task. This happens if a task owns a lock which is requested by a task
+with higher priority. This can be used as a hint to add a preemption
+point to the critical section.
+
+Provide a function which reports true if the task is PI-boosted.
+
+Link: https://lore.kernel.org/r/20230804113039.419794-2-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ include/linux/sched.h | 1 +
+ kernel/sched/core.c | 15 +++++++++++++++
+ 2 files changed, 16 insertions(+)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 67623ffd4a8e..eab173e5d09b 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1905,6 +1905,7 @@ static inline int dl_task_check_affinity(struct task_struct *p, const struct cpu
+ }
+ #endif
+
++extern bool task_is_pi_boosted(const struct task_struct *p);
+ extern int yield_to(struct task_struct *p, bool preempt);
+ extern void set_user_nice(struct task_struct *p, long nice);
+ extern int task_prio(const struct task_struct *p);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 90f9124ac027..7134598e3284 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -8923,6 +8923,21 @@ static inline void preempt_dynamic_init(void) { }
+
+ #endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
+
++/*
++ * task_is_pi_boosted - Check if task has been PI boosted.
++ * @p: Task to check.
++ *
++ * Return true if task is subject to priority inheritance.
++ */
++bool task_is_pi_boosted(const struct task_struct *p)
++{
++ int prio = p->prio;
++
++ if (!rt_prio(prio))
++ return false;
++ return prio != p->normal_prio;
++}
++
+ /**
+ * yield - yield the current processor to other threads.
+ *
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0026-softirq-Add-function-to-preempt-serving-softirqs.patch b/buildroot-external/patches/linux/0026-softirq-Add-function-to-preempt-serving-softirqs.patch
new file mode 100644
index 00000000..2c515beb
--- /dev/null
+++ b/buildroot-external/patches/linux/0026-softirq-Add-function-to-preempt-serving-softirqs.patch
@@ -0,0 +1,67 @@
+From c330e617b94bbc517da0aaabecfd3b3c007d3e62 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior
+Date: Fri, 4 Aug 2023 13:30:38 +0200
+Subject: [PATCH 026/195] softirq: Add function to preempt serving softirqs.
+
+Add a functionality for the softirq handler to preempt its current work
+if needed. The softirq core has no particular state. It reads and resets
+the pending softirq bits and then processes one after the other.
+It can already be preempted while it invokes a certain softirq handler.
+
+By enabling the BH the softirq core releases the per-CPU bh lock which
+serializes all softirq handler. It is safe to do as long as the code
+does not expect any serialisation in between. A typical scenarion would
+after the invocation of callback where no state needs to be preserved
+before the next callback is invoked.
+
+Add functionaliry to preempt the serving softirqs.
+
+Link: https://lore.kernel.org/r/20230804113039.419794-3-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ include/linux/bottom_half.h | 2 ++
+ kernel/softirq.c | 13 +++++++++++++
+ 2 files changed, 15 insertions(+)
+
+diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
+index fc53e0ad56d9..448bbef47456 100644
+--- a/include/linux/bottom_half.h
++++ b/include/linux/bottom_half.h
+@@ -35,8 +35,10 @@ static inline void local_bh_enable(void)
+
+ #ifdef CONFIG_PREEMPT_RT
+ extern bool local_bh_blocked(void);
++extern void softirq_preempt(void);
+ #else
+ static inline bool local_bh_blocked(void) { return false; }
++static inline void softirq_preempt(void) { }
+ #endif
+
+ #endif /* _LINUX_BH_H */
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index c2474cc4fa51..cae0ae2e2b0b 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -247,6 +247,19 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
+ }
+ EXPORT_SYMBOL(__local_bh_enable_ip);
+
++void softirq_preempt(void)
++{
++ if (WARN_ON_ONCE(!preemptible()))
++ return;
++
++ if (WARN_ON_ONCE(__this_cpu_read(softirq_ctrl.cnt) != SOFTIRQ_OFFSET))
++ return;
++
++ __local_bh_enable(SOFTIRQ_OFFSET, true);
++ /* preemption point */
++ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
++}
++
+ /*
+ * Invoked from ksoftirqd_run() outside of the interrupt disabled section
+ * to acquire the per CPU local lock for reentrancy protection.
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0027-time-Allow-to-preempt-after-a-callback.patch b/buildroot-external/patches/linux/0027-time-Allow-to-preempt-after-a-callback.patch
new file mode 100644
index 00000000..d406e878
--- /dev/null
+++ b/buildroot-external/patches/linux/0027-time-Allow-to-preempt-after-a-callback.patch
@@ -0,0 +1,52 @@
+From 811417a29d37605f932c88499d94379ac8535991 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior
+Date: Fri, 4 Aug 2023 13:30:39 +0200
+Subject: [PATCH 027/195] time: Allow to preempt after a callback.
+
+The TIMER_SOFTIRQ handler invokes timer callbacks of the expired timers.
+Before each invocation the timer_base::lock is dropped. The only lock
+that is still held is the timer_base::expiry_lock and the per-CPU
+bh-lock as part of local_bh_disable(). The former is released as part
+of lock up prevention if the timer is preempted by the caller which is
+waiting for its completion.
+
+Both locks are already released as part of timer_sync_wait_running().
+This can be extended by also releasing in bh-lock. The timer core does
+not rely on any state that is serialized by the bh-lock. The timer
+callback expects the bh-state to be serialized by the lock but there is
+no need to keep state synchronized while invoking multiple callbacks.
+
+Preempt handling softirqs and release all locks after a timer invocation
+if the current has inherited priority.
+
+Link: https://lore.kernel.org/r/20230804113039.419794-4-bigeasy@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ kernel/time/timer.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 7cad6fe3c035..b3fbe97d1e34 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1470,9 +1470,16 @@ static inline void timer_base_unlock_expiry(struct timer_base *base)
+ */
+ static void timer_sync_wait_running(struct timer_base *base)
+ {
+- if (atomic_read(&base->timer_waiters)) {
++ bool need_preempt;
++
++ need_preempt = task_is_pi_boosted(current);
++ if (need_preempt || atomic_read(&base->timer_waiters)) {
+ raw_spin_unlock_irq(&base->lock);
+ spin_unlock(&base->expiry_lock);
++
++ if (need_preempt)
++ softirq_preempt();
++
+ spin_lock(&base->expiry_lock);
+ raw_spin_lock_irq(&base->lock);
+ }
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0028-serial-core-Provide-port-lock-wrappers.patch b/buildroot-external/patches/linux/0028-serial-core-Provide-port-lock-wrappers.patch
new file mode 100644
index 00000000..2f46b47f
--- /dev/null
+++ b/buildroot-external/patches/linux/0028-serial-core-Provide-port-lock-wrappers.patch
@@ -0,0 +1,131 @@
+From 681f56a112fc24a97bfdffcef7655e5bf9ffb6e8 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner
+Date: Thu, 14 Sep 2023 20:43:18 +0206
+Subject: [PATCH 028/195] serial: core: Provide port lock wrappers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+Provide wrapper functions for spin_[un]lock*(port->lock) invocations so
+that the console mechanics can be applied later on at a single place and
+does not require to copy the same logic all over the drivers.
+
+Signed-off-by: Thomas Gleixner
+Reviewed-by: Ilpo Järvinen
+Signed-off-by: John Ogness
+Link: https://lore.kernel.org/r/20230914183831.587273-2-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ include/linux/serial_core.h | 79 +++++++++++++++++++++++++++++++++++++
+ 1 file changed, 79 insertions(+)
+
+diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
+index bb6f073bc159..f1d5c0d1568c 100644
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -588,6 +588,85 @@ struct uart_port {
+ void *private_data; /* generic platform data pointer */
+ };
+
++/**
++ * uart_port_lock - Lock the UART port
++ * @up: Pointer to UART port structure
++ */
++static inline void uart_port_lock(struct uart_port *up)
++{
++ spin_lock(&up->lock);
++}
++
++/**
++ * uart_port_lock_irq - Lock the UART port and disable interrupts
++ * @up: Pointer to UART port structure
++ */
++static inline void uart_port_lock_irq(struct uart_port *up)
++{
++ spin_lock_irq(&up->lock);
++}
++
++/**
++ * uart_port_lock_irqsave - Lock the UART port, save and disable interrupts
++ * @up: Pointer to UART port structure
++ * @flags: Pointer to interrupt flags storage
++ */
++static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
++{
++ spin_lock_irqsave(&up->lock, *flags);
++}
++
++/**
++ * uart_port_trylock - Try to lock the UART port
++ * @up: Pointer to UART port structure
++ *
++ * Returns: True if lock was acquired, false otherwise
++ */
++static inline bool uart_port_trylock(struct uart_port *up)
++{
++ return spin_trylock(&up->lock);
++}
++
++/**
++ * uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts
++ * @up: Pointer to UART port structure
++ * @flags: Pointer to interrupt flags storage
++ *
++ * Returns: True if lock was acquired, false otherwise
++ */
++static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
++{
++ return spin_trylock_irqsave(&up->lock, *flags);
++}
++
++/**
++ * uart_port_unlock - Unlock the UART port
++ * @up: Pointer to UART port structure
++ */
++static inline void uart_port_unlock(struct uart_port *up)
++{
++ spin_unlock(&up->lock);
++}
++
++/**
++ * uart_port_unlock_irq - Unlock the UART port and re-enable interrupts
++ * @up: Pointer to UART port structure
++ */
++static inline void uart_port_unlock_irq(struct uart_port *up)
++{
++ spin_unlock_irq(&up->lock);
++}
++
++/**
++ * uart_port_lock_irqrestore - Unlock the UART port, restore interrupts
++ * @up: Pointer to UART port structure
++ * @flags: The saved interrupt flags for restore
++ */
++static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
++{
++ spin_unlock_irqrestore(&up->lock, flags);
++}
++
+ static inline int serial_port_in(struct uart_port *up, int offset)
+ {
+ return up->serial_in(up, offset);
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0029-serial-core-Use-lock-wrappers.patch b/buildroot-external/patches/linux/0029-serial-core-Use-lock-wrappers.patch
new file mode 100644
index 00000000..f15e8fc2
--- /dev/null
+++ b/buildroot-external/patches/linux/0029-serial-core-Use-lock-wrappers.patch
@@ -0,0 +1,98 @@
+From e642eaa68ae3692f66e9f8b987f23aebc709e326 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner
+Date: Thu, 14 Sep 2023 20:43:19 +0206
+Subject: [PATCH 029/195] serial: core: Use lock wrappers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner
+Reviewed-by: Ilpo Järvinen
+Signed-off-by: John Ogness
+Link: https://lore.kernel.org/r/20230914183831.587273-3-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ include/linux/serial_core.h | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
+index f1d5c0d1568c..3091c62ec37b 100644
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -1035,14 +1035,14 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
+ u8 sysrq_ch;
+
+ if (!port->has_sysrq) {
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+ return;
+ }
+
+ sysrq_ch = port->sysrq_ch;
+ port->sysrq_ch = 0;
+
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ if (sysrq_ch)
+ handle_sysrq(sysrq_ch);
+@@ -1054,14 +1054,14 @@ static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port
+ u8 sysrq_ch;
+
+ if (!port->has_sysrq) {
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ return;
+ }
+
+ sysrq_ch = port->sysrq_ch;
+ port->sysrq_ch = 0;
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ if (sysrq_ch)
+ handle_sysrq(sysrq_ch);
+@@ -1077,12 +1077,12 @@ static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch)
+ }
+ static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
+ {
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+ }
+ static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
+ unsigned long flags)
+ {
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+ #endif /* CONFIG_MAGIC_SYSRQ_SERIAL */
+
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0030-serial-21285-Use-port-lock-wrappers.patch b/buildroot-external/patches/linux/0030-serial-21285-Use-port-lock-wrappers.patch
new file mode 100644
index 00000000..648e8eba
--- /dev/null
+++ b/buildroot-external/patches/linux/0030-serial-21285-Use-port-lock-wrappers.patch
@@ -0,0 +1,80 @@
+From 5acefbbc9066e05a67448a4ddbf2385cf8caedd5 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner
+Date: Thu, 14 Sep 2023 20:43:20 +0206
+Subject: [PATCH 030/195] serial: 21285: Use port lock wrappers
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner
+Signed-off-by: John Ogness
+Link: https://lore.kernel.org/r/20230914183831.587273-4-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ drivers/tty/serial/21285.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c
+index d756fcc884cb..4de0c975ebdc 100644
+--- a/drivers/tty/serial/21285.c
++++ b/drivers/tty/serial/21285.c
+@@ -185,14 +185,14 @@ static void serial21285_break_ctl(struct uart_port *port, int break_state)
+ unsigned long flags;
+ unsigned int h_lcr;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ h_lcr = *CSR_H_UBRLCR;
+ if (break_state)
+ h_lcr |= H_UBRLCR_BREAK;
+ else
+ h_lcr &= ~H_UBRLCR_BREAK;
+ *CSR_H_UBRLCR = h_lcr;
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static int serial21285_startup(struct uart_port *port)
+@@ -272,7 +272,7 @@ serial21285_set_termios(struct uart_port *port, struct ktermios *termios,
+ if (port->fifosize)
+ h_lcr |= H_UBRLCR_FIFO;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /*
+ * Update the per-port timeout.
+@@ -309,7 +309,7 @@ serial21285_set_termios(struct uart_port *port, struct ktermios *termios,
+ *CSR_H_UBRLCR = h_lcr;
+ *CSR_UARTCON = 1;
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static const char *serial21285_type(struct uart_port *port)
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0031-serial-8250_aspeed_vuart-Use-port-lock-wrappers.patch b/buildroot-external/patches/linux/0031-serial-8250_aspeed_vuart-Use-port-lock-wrappers.patch
new file mode 100644
index 00000000..9a8717e1
--- /dev/null
+++ b/buildroot-external/patches/linux/0031-serial-8250_aspeed_vuart-Use-port-lock-wrappers.patch
@@ -0,0 +1,66 @@
+From c33a0cbdf0fa230865a8dfdb6470235689178915 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner
+Date: Thu, 14 Sep 2023 20:43:21 +0206
+Subject: [PATCH 031/195] serial: 8250_aspeed_vuart: Use port lock wrappers
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner
+Signed-off-by: John Ogness
+Link: https://lore.kernel.org/r/20230914183831.587273-5-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ drivers/tty/serial/8250/8250_aspeed_vuart.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
+index 4a9e71b2dbbc..021949f252f8 100644
+--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
++++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
+@@ -288,9 +288,9 @@ static void aspeed_vuart_set_throttle(struct uart_port *port, bool throttle)
+ struct uart_8250_port *up = up_to_u8250p(port);
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ __aspeed_vuart_set_throttle(up, throttle);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static void aspeed_vuart_throttle(struct uart_port *port)
+@@ -340,7 +340,7 @@ static int aspeed_vuart_handle_irq(struct uart_port *port)
+ if (iir & UART_IIR_NO_INT)
+ return 0;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ lsr = serial_port_in(port, UART_LSR);
+
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0032-sched-Add-support-for-lazy-preemption.patch b/buildroot-external/patches/linux/0032-sched-Add-support-for-lazy-preemption.patch
deleted file mode 100644
index 65c65f3b..00000000
--- a/buildroot-external/patches/linux/0032-sched-Add-support-for-lazy-preemption.patch
+++ /dev/null
@@ -1,713 +0,0 @@
-From fc4755c31f7adcf012556bfc2db7e84c78aa0ad2 Mon Sep 17 00:00:00 2001
-From: Thomas Gleixner
-Date: Fri, 26 Oct 2012 18:50:54 +0100
-Subject: [PATCH 32/62] sched: Add support for lazy preemption
-
-It has become an obsession to mitigate the determinism vs. throughput
-loss of RT. Looking at the mainline semantics of preemption points
-gives a hint why RT sucks throughput wise for ordinary SCHED_OTHER
-tasks. One major issue is the wakeup of tasks which are right away
-preempting the waking task while the waking task holds a lock on which
-the woken task will block right after having preempted the wakee. In
-mainline this is prevented due to the implicit preemption disable of
-spin/rw_lock held regions. On RT this is not possible due to the fully
-preemptible nature of sleeping spinlocks.
-
-Though for a SCHED_OTHER task preempting another SCHED_OTHER task this
-is really not a correctness issue. RT folks are concerned about
-SCHED_FIFO/RR tasks preemption and not about the purely fairness
-driven SCHED_OTHER preemption latencies.
-
-So I introduced a lazy preemption mechanism which only applies to
-SCHED_OTHER tasks preempting another SCHED_OTHER task. Aside of the
-existing preempt_count each tasks sports now a preempt_lazy_count
-which is manipulated on lock acquiry and release. This is slightly
-incorrect as for lazyness reasons I coupled this on
-migrate_disable/enable so some other mechanisms get the same treatment
-(e.g. get_cpu_light).
-
-Now on the scheduler side instead of setting NEED_RESCHED this sets
-NEED_RESCHED_LAZY in case of a SCHED_OTHER/SCHED_OTHER preemption and
-therefor allows to exit the waking task the lock held region before
-the woken task preempts. That also works better for cross CPU wakeups
-as the other side can stay in the adaptive spinning loop.
-
-For RT class preemption there is no change. This simply sets
-NEED_RESCHED and forgoes the lazy preemption counter.
-
- Initial test do not expose any observable latency increasement, but
-history shows that I've been proven wrong before :)
-
-The lazy preemption mode is per default on, but with
-CONFIG_SCHED_DEBUG enabled it can be disabled via:
-
- # echo NO_PREEMPT_LAZY >/sys/kernel/debug/sched_features
-
-and reenabled via
-
- # echo PREEMPT_LAZY >/sys/kernel/debug/sched_features
-
-The test results so far are very machine and workload dependent, but
-there is a clear trend that it enhances the non RT workload
-performance.
-
-Signed-off-by: Thomas Gleixner
----
- include/linux/preempt.h | 54 ++++++++++++++++++++++--
- include/linux/sched.h | 37 +++++++++++++++++
- include/linux/thread_info.h | 12 +++++-
- include/linux/trace_events.h | 10 ++++-
- kernel/Kconfig.preempt | 6 +++
- kernel/sched/core.c | 79 +++++++++++++++++++++++++++++++++++-
- kernel/sched/fair.c | 16 ++++----
- kernel/sched/features.h | 3 ++
- kernel/sched/sched.h | 9 ++++
- kernel/trace/trace.c | 50 ++++++++++++++---------
- kernel/trace/trace_events.c | 1 +
- kernel/trace/trace_output.c | 18 +++++++-
- 12 files changed, 260 insertions(+), 35 deletions(-)
-
-diff --git a/include/linux/preempt.h b/include/linux/preempt.h
-index 8cfcc5d45451..9fc4c4bb320f 100644
---- a/include/linux/preempt.h
-+++ b/include/linux/preempt.h
-@@ -207,6 +207,20 @@ extern void preempt_count_sub(int val);
- #define preempt_count_inc() preempt_count_add(1)
- #define preempt_count_dec() preempt_count_sub(1)
-
-+#ifdef CONFIG_PREEMPT_LAZY
-+#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
-+#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
-+#define inc_preempt_lazy_count() add_preempt_lazy_count(1)
-+#define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
-+#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
-+#else
-+#define add_preempt_lazy_count(val) do { } while (0)
-+#define sub_preempt_lazy_count(val) do { } while (0)
-+#define inc_preempt_lazy_count() do { } while (0)
-+#define dec_preempt_lazy_count() do { } while (0)
-+#define preempt_lazy_count() (0)
-+#endif
-+
- #ifdef CONFIG_PREEMPT_COUNT
-
- #define preempt_disable() \
-@@ -215,6 +229,12 @@ do { \
- barrier(); \
- } while (0)
-
-+#define preempt_lazy_disable() \
-+do { \
-+ inc_preempt_lazy_count(); \
-+ barrier(); \
-+} while (0)
-+
- #define sched_preempt_enable_no_resched() \
- do { \
- barrier(); \
-@@ -246,6 +266,18 @@ do { \
- __preempt_schedule(); \
- } while (0)
-
-+/*
-+ * open code preempt_check_resched() because it is not exported to modules and
-+ * used by local_unlock() or bpf_enable_instrumentation().
-+ */
-+#define preempt_lazy_enable() \
-+do { \
-+ dec_preempt_lazy_count(); \
-+ barrier(); \
-+ if (should_resched(0)) \
-+ __preempt_schedule(); \
-+} while (0)
-+
- #else /* !CONFIG_PREEMPTION */
- #define preempt_enable() \
- do { \
-@@ -253,6 +285,12 @@ do { \
- preempt_count_dec(); \
- } while (0)
-
-+#define preempt_lazy_enable() \
-+do { \
-+ dec_preempt_lazy_count(); \
-+ barrier(); \
-+} while (0)
-+
- #define preempt_enable_notrace() \
- do { \
- barrier(); \
-@@ -293,6 +331,9 @@ do { \
- #define preempt_enable_notrace() barrier()
- #define preemptible() 0
-
-+#define preempt_lazy_disable() barrier()
-+#define preempt_lazy_enable() barrier()
-+
- #endif /* CONFIG_PREEMPT_COUNT */
-
- #ifdef MODULE
-@@ -311,7 +352,7 @@ do { \
- } while (0)
- #define preempt_fold_need_resched() \
- do { \
-- if (tif_need_resched()) \
-+ if (tif_need_resched_now()) \
- set_preempt_need_resched(); \
- } while (0)
-
-@@ -427,8 +468,15 @@ extern void migrate_enable(void);
-
- #else
-
--static inline void migrate_disable(void) { }
--static inline void migrate_enable(void) { }
-+static inline void migrate_disable(void)
-+{
-+ preempt_lazy_disable();
-+}
-+
-+static inline void migrate_enable(void)
-+{
-+ preempt_lazy_enable();
-+}
-
- #endif /* CONFIG_SMP */
-
-diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 0cac69902ec5..67ec36dbfacf 100644
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -2061,6 +2061,43 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
- return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
- }
-
-+#ifdef CONFIG_PREEMPT_LAZY
-+static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
-+{
-+ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
-+}
-+
-+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
-+{
-+ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
-+}
-+
-+static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
-+{
-+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
-+}
-+
-+static inline int need_resched_lazy(void)
-+{
-+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
-+}
-+
-+static inline int need_resched_now(void)
-+{
-+ return test_thread_flag(TIF_NEED_RESCHED);
-+}
-+
-+#else
-+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
-+static inline int need_resched_lazy(void) { return 0; }
-+
-+static inline int need_resched_now(void)
-+{
-+ return test_thread_flag(TIF_NEED_RESCHED);
-+}
-+
-+#endif
-+
- /*
- * cond_resched() and cond_resched_lock(): latency reduction via
- * explicit rescheduling in places that are safe. The return
-diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
-index 9f392ec76f2b..779e0e96b9cb 100644
---- a/include/linux/thread_info.h
-+++ b/include/linux/thread_info.h
-@@ -177,7 +177,17 @@ static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti
- clear_ti_thread_flag(task_thread_info(t), TIF_##fl)
- #endif /* !CONFIG_GENERIC_ENTRY */
-
--#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
-+#ifdef CONFIG_PREEMPT_LAZY
-+#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \
-+ test_thread_flag(TIF_NEED_RESCHED_LAZY))
-+#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED))
-+#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY)
-+
-+#else
-+#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
-+#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED)
-+#define tif_need_resched_lazy() 0
-+#endif
-
- #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
- static inline int arch_within_stack_frames(const void * const stack,
-diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
-index c8b5e9781d01..743b1183d184 100644
---- a/include/linux/trace_events.h
-+++ b/include/linux/trace_events.h
-@@ -70,6 +70,7 @@ struct trace_entry {
- unsigned char flags;
- unsigned char preempt_count;
- int pid;
-+ unsigned char preempt_lazy_count;
- };
-
- #define TRACE_EVENT_TYPE_MAX \
-@@ -159,9 +160,10 @@ static inline void tracing_generic_entry_update(struct trace_entry *entry,
- unsigned int trace_ctx)
- {
- entry->preempt_count = trace_ctx & 0xff;
-+ entry->preempt_lazy_count = (trace_ctx >> 16) & 0xff;
- entry->pid = current->pid;
- entry->type = type;
-- entry->flags = trace_ctx >> 16;
-+ entry->flags = trace_ctx >> 24;
- }
-
- unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
-@@ -172,7 +174,13 @@ enum trace_flag_type {
- TRACE_FLAG_NEED_RESCHED = 0x04,
- TRACE_FLAG_HARDIRQ = 0x08,
- TRACE_FLAG_SOFTIRQ = 0x10,
-+#ifdef CONFIG_PREEMPT_LAZY
-+ TRACE_FLAG_PREEMPT_RESCHED = 0x00,
-+ TRACE_FLAG_NEED_RESCHED_LAZY = 0x20,
-+#else
-+ TRACE_FLAG_NEED_RESCHED_LAZY = 0x00,
- TRACE_FLAG_PREEMPT_RESCHED = 0x20,
-+#endif
- TRACE_FLAG_NMI = 0x40,
- TRACE_FLAG_BH_OFF = 0x80,
- };
-diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
-index c2f1fd95a821..260c08efeb48 100644
---- a/kernel/Kconfig.preempt
-+++ b/kernel/Kconfig.preempt
-@@ -1,5 +1,11 @@
- # SPDX-License-Identifier: GPL-2.0-only
-
-+config HAVE_PREEMPT_LAZY
-+ bool
-+
-+config PREEMPT_LAZY
-+ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT
-+
- config PREEMPT_NONE_BUILD
- bool
-
-diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 6bd06122850a..b72fc7d336e4 100644
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -1040,6 +1040,46 @@ void resched_curr(struct rq *rq)
- trace_sched_wake_idle_without_ipi(cpu);
- }
-
-+#ifdef CONFIG_PREEMPT_LAZY
-+
-+static int tsk_is_polling(struct task_struct *p)
-+{
-+#ifdef TIF_POLLING_NRFLAG
-+ return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
-+#else
-+ return 0;
-+#endif
-+}
-+
-+void resched_curr_lazy(struct rq *rq)
-+{
-+ struct task_struct *curr = rq->curr;
-+ int cpu;
-+
-+ if (!sched_feat(PREEMPT_LAZY)) {
-+ resched_curr(rq);
-+ return;
-+ }
-+
-+ if (test_tsk_need_resched(curr))
-+ return;
-+
-+ if (test_tsk_need_resched_lazy(curr))
-+ return;
-+
-+ set_tsk_need_resched_lazy(curr);
-+
-+ cpu = cpu_of(rq);
-+ if (cpu == smp_processor_id())
-+ return;
-+
-+ /* NEED_RESCHED_LAZY must be visible before we test polling */
-+ smp_mb();
-+ if (!tsk_is_polling(curr))
-+ smp_send_reschedule(cpu);
-+}
-+#endif
-+
- void resched_cpu(int cpu)
- {
- struct rq *rq = cpu_rq(cpu);
-@@ -2224,6 +2264,7 @@ void migrate_disable(void)
- preempt_disable();
- this_rq()->nr_pinned++;
- p->migration_disabled = 1;
-+ preempt_lazy_disable();
- preempt_enable();
- }
- EXPORT_SYMBOL_GPL(migrate_disable);
-@@ -2255,6 +2296,7 @@ void migrate_enable(void)
- barrier();
- p->migration_disabled = 0;
- this_rq()->nr_pinned--;
-+ preempt_lazy_enable();
- preempt_enable();
- }
- EXPORT_SYMBOL_GPL(migrate_enable);
-@@ -4722,6 +4764,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
- p->on_cpu = 0;
- #endif
- init_task_preempt_count(p);
-+#ifdef CONFIG_HAVE_PREEMPT_LAZY
-+ task_thread_info(p)->preempt_lazy_count = 0;
-+#endif
- #ifdef CONFIG_SMP
- plist_node_init(&p->pushable_tasks, MAX_PRIO);
- RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -6592,6 +6637,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
-
- next = pick_next_task(rq, prev, &rf);
- clear_tsk_need_resched(prev);
-+ clear_tsk_need_resched_lazy(prev);
- clear_preempt_need_resched();
- #ifdef CONFIG_SCHED_DEBUG
- rq->last_seen_need_resched_ns = 0;
-@@ -6806,6 +6852,30 @@ static void __sched notrace preempt_schedule_common(void)
- } while (need_resched());
- }
-
-+#ifdef CONFIG_PREEMPT_LAZY
-+/*
-+ * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is
-+ * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as
-+ * preempt_lazy_count counter >0.
-+ */
-+static __always_inline int preemptible_lazy(void)
-+{
-+ if (test_thread_flag(TIF_NEED_RESCHED))
-+ return 1;
-+ if (current_thread_info()->preempt_lazy_count)
-+ return 0;
-+ return 1;
-+}
-+
-+#else
-+
-+static inline int preemptible_lazy(void)
-+{
-+ return 1;
-+}
-+
-+#endif
-+
- #ifdef CONFIG_PREEMPTION
- /*
- * This is the entry point to schedule() from in-kernel preemption
-@@ -6819,6 +6889,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
- */
- if (likely(!preemptible()))
- return;
-+ if (!preemptible_lazy())
-+ return;
- preempt_schedule_common();
- }
- NOKPROBE_SYMBOL(preempt_schedule);
-@@ -6866,6 +6938,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
- if (likely(!preemptible()))
- return;
-
-+ if (!preemptible_lazy())
-+ return;
-+
- do {
- /*
- * Because the function tracer can trace preempt_count_sub()
-@@ -9131,7 +9206,9 @@ void __init init_idle(struct task_struct *idle, int cpu)
-
- /* Set the preempt count _outside_ the spinlocks! */
- init_idle_preempt_count(idle, cpu);
--
-+#ifdef CONFIG_HAVE_PREEMPT_LAZY
-+ task_thread_info(idle)->preempt_lazy_count = 0;
-+#endif
- /*
- * The idle tasks have their own, simple scheduling class:
- */
-diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index 2558ab9033be..2dc35af7b5a6 100644
---- a/kernel/sched/fair.c
-+++ b/kernel/sched/fair.c
-@@ -4914,7 +4914,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
- ideal_runtime = sched_slice(cfs_rq, curr);
- delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
- if (delta_exec > ideal_runtime) {
-- resched_curr(rq_of(cfs_rq));
-+ resched_curr_lazy(rq_of(cfs_rq));
- /*
- * The current task ran long enough, ensure it doesn't get
- * re-elected due to buddy favours.
-@@ -4938,7 +4938,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
- return;
-
- if (delta > ideal_runtime)
-- resched_curr(rq_of(cfs_rq));
-+ resched_curr_lazy(rq_of(cfs_rq));
- }
-
- static void
-@@ -5084,7 +5084,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
- * validating it and just reschedule.
- */
- if (queued) {
-- resched_curr(rq_of(cfs_rq));
-+ resched_curr_lazy(rq_of(cfs_rq));
- return;
- }
- /*
-@@ -5233,7 +5233,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
- * hierarchy can be throttled
- */
- if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
-- resched_curr(rq_of(cfs_rq));
-+ resched_curr_lazy(rq_of(cfs_rq));
- }
-
- static __always_inline
-@@ -5984,7 +5984,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
-
- if (delta < 0) {
- if (task_current(rq, p))
-- resched_curr(rq);
-+ resched_curr_lazy(rq);
- return;
- }
- hrtick_start(rq, delta);
-@@ -7712,7 +7712,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
- return;
-
- preempt:
-- resched_curr(rq);
-+ resched_curr_lazy(rq);
- /*
- * Only set the backward buddy when the current task is still
- * on the rq. This can happen when a wakeup gets interleaved
-@@ -11877,7 +11877,7 @@ static void task_fork_fair(struct task_struct *p)
- * 'current' within the tree based on its new key value.
- */
- swap(curr->vruntime, se->vruntime);
-- resched_curr(rq);
-+ resched_curr_lazy(rq);
- }
-
- se->vruntime -= cfs_rq->min_vruntime;
-@@ -11904,7 +11904,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
- */
- if (task_current(rq, p)) {
- if (p->prio > oldprio)
-- resched_curr(rq);
-+ resched_curr_lazy(rq);
- } else
- check_preempt_curr(rq, p, 0);
- }
-diff --git a/kernel/sched/features.h b/kernel/sched/features.h
-index ee7f23c76bd3..e13090e33f3c 100644
---- a/kernel/sched/features.h
-+++ b/kernel/sched/features.h
-@@ -48,6 +48,9 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
-
- #ifdef CONFIG_PREEMPT_RT
- SCHED_FEAT(TTWU_QUEUE, false)
-+# ifdef CONFIG_PREEMPT_LAZY
-+SCHED_FEAT(PREEMPT_LAZY, true)
-+# endif
- #else
-
- /*
-diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index b62d53d7c264..f2577f511a41 100644
---- a/kernel/sched/sched.h
-+++ b/kernel/sched/sched.h
-@@ -2350,6 +2350,15 @@ extern void reweight_task(struct task_struct *p, int prio);
- extern void resched_curr(struct rq *rq);
- extern void resched_cpu(int cpu);
-
-+#ifdef CONFIG_PREEMPT_LAZY
-+extern void resched_curr_lazy(struct rq *rq);
-+#else
-+static inline void resched_curr_lazy(struct rq *rq)
-+{
-+ resched_curr(rq);
-+}
-+#endif
-+
- extern struct rt_bandwidth def_rt_bandwidth;
- extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
- extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
-diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
-index deae65af76ec..edf0407d5498 100644
---- a/kernel/trace/trace.c
-+++ b/kernel/trace/trace.c
-@@ -2630,11 +2630,19 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
- if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
- trace_flags |= TRACE_FLAG_BH_OFF;
-
-- if (tif_need_resched())
-+ if (tif_need_resched_now())
- trace_flags |= TRACE_FLAG_NEED_RESCHED;
-+#ifdef CONFIG_PREEMPT_LAZY
-+ /* Run out of bits. Share the LAZY and PREEMPT_RESCHED */
-+ if (need_resched_lazy())
-+ trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY;
-+#else
- if (test_preempt_need_resched())
- trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
-- return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
-+#endif
-+
-+ return (trace_flags << 24) | (min_t(unsigned int, pc & 0xff, 0xf)) |
-+ (preempt_lazy_count() & 0xff) << 16 |
- (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
- }
-
-@@ -4226,15 +4234,17 @@ unsigned long trace_total_entries(struct trace_array *tr)
-
- static void print_lat_help_header(struct seq_file *m)
- {
-- seq_puts(m, "# _------=> CPU# \n"
-- "# / _-----=> irqs-off/BH-disabled\n"
-- "# | / _----=> need-resched \n"
-- "# || / _---=> hardirq/softirq \n"
-- "# ||| / _--=> preempt-depth \n"
-- "# |||| / _-=> migrate-disable \n"
-- "# ||||| / delay \n"
-- "# cmd pid |||||| time | caller \n"
-- "# \\ / |||||| \\ | / \n");
-+ seq_puts(m, "# _--------=> CPU# \n"
-+ "# / _-------=> irqs-off/BH-disabled\n"
-+ "# | / _------=> need-resched \n"
-+ "# || / _-----=> need-resched-lazy\n"
-+ "# ||| / _----=> hardirq/softirq \n"
-+ "# |||| / _---=> preempt-depth \n"
-+ "# ||||| / _--=> preempt-lazy-depth\n"
-+ "# |||||| / _-=> migrate-disable \n"
-+ "# ||||||| / delay \n"
-+ "# cmd pid |||||||| time | caller \n"
-+ "# \\ / |||||||| \\ | / \n");
- }
-
- static void print_event_info(struct array_buffer *buf, struct seq_file *m)
-@@ -4268,14 +4278,16 @@ static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file
-
- print_event_info(buf, m);
-
-- seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
-- seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
-- seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
-- seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
-- seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
-- seq_printf(m, "# %.*s|||| / delay\n", prec, space);
-- seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
-- seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
-+ seq_printf(m, "# %.*s _-------=> irqs-off/BH-disabled\n", prec, space);
-+ seq_printf(m, "# %.*s / _------=> need-resched\n", prec, space);
-+ seq_printf(m, "# %.*s| / _-----=> need-resched-lazy\n", prec, space);
-+ seq_printf(m, "# %.*s|| / _----=> hardirq/softirq\n", prec, space);
-+ seq_printf(m, "# %.*s||| / _---=> preempt-depth\n", prec, space);
-+ seq_printf(m, "# %.*s|||| / _--=> preempt-lazy-depth\n", prec, space);
-+ seq_printf(m, "# %.*s||||| / _-=> migrate-disable\n", prec, space);
-+ seq_printf(m, "# %.*s|||||| / delay\n", prec, space);
-+ seq_printf(m, "# TASK-PID %.*s CPU# ||||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
-+ seq_printf(m, "# | | %.*s | ||||||| | |\n", prec, " | ");
- }
-
- void
-diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
-index a6d2f99f847d..493c3f9cf01a 100644
---- a/kernel/trace/trace_events.c
-+++ b/kernel/trace/trace_events.c
-@@ -208,6 +208,7 @@ static int trace_define_common_fields(void)
- /* Holds both preempt_count and migrate_disable */
- __common_field(unsigned char, preempt_count);
- __common_field(int, pid);
-+ __common_field(unsigned char, preempt_lazy_count);
-
- return ret;
- }
-diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
-index 5cd4fb656306..3c227e2843ae 100644
---- a/kernel/trace/trace_output.c
-+++ b/kernel/trace/trace_output.c
-@@ -442,6 +442,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
- {
- char hardsoft_irq;
- char need_resched;
-+ char need_resched_lazy;
- char irqs_off;
- int hardirq;
- int softirq;
-@@ -462,20 +463,27 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
-
- switch (entry->flags & (TRACE_FLAG_NEED_RESCHED |
- TRACE_FLAG_PREEMPT_RESCHED)) {
-+#ifndef CONFIG_PREEMPT_LAZY
- case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
- need_resched = 'N';
- break;
-+#endif
- case TRACE_FLAG_NEED_RESCHED:
- need_resched = 'n';
- break;
-+#ifndef CONFIG_PREEMPT_LAZY
- case TRACE_FLAG_PREEMPT_RESCHED:
- need_resched = 'p';
- break;
-+#endif
- default:
- need_resched = '.';
- break;
- }
-
-+ need_resched_lazy =
-+ (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.';
-+
- hardsoft_irq =
- (nmi && hardirq) ? 'Z' :
- nmi ? 'z' :
-@@ -484,14 +492,20 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
- softirq ? 's' :
- '.' ;
-
-- trace_seq_printf(s, "%c%c%c",
-- irqs_off, need_resched, hardsoft_irq);
-+ trace_seq_printf(s, "%c%c%c%c",
-+ irqs_off, need_resched, need_resched_lazy,
-+ hardsoft_irq);
-
- if (entry->preempt_count & 0xf)
- trace_seq_printf(s, "%x", entry->preempt_count & 0xf);
- else
- trace_seq_putc(s, '.');
-
-+ if (entry->preempt_lazy_count)
-+ trace_seq_printf(s, "%x", entry->preempt_lazy_count);
-+ else
-+ trace_seq_putc(s, '.');
-+
- if (entry->preempt_count & 0xf0)
- trace_seq_printf(s, "%x", entry->preempt_count >> 4);
- else
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0032-serial-8250_bcm7271-Use-port-lock-wrappers.patch b/buildroot-external/patches/linux/0032-serial-8250_bcm7271-Use-port-lock-wrappers.patch
new file mode 100644
index 00000000..de2a3623
--- /dev/null
+++ b/buildroot-external/patches/linux/0032-serial-8250_bcm7271-Use-port-lock-wrappers.patch
@@ -0,0 +1,156 @@
+From 9b3ce8f4924d20af62e1958030c88c035e5664a6 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner
+Date: Thu, 14 Sep 2023 20:43:22 +0206
+Subject: [PATCH 032/195] serial: 8250_bcm7271: Use port lock wrappers
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner
+Signed-off-by: John Ogness
+Link: https://lore.kernel.org/r/20230914183831.587273-6-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ drivers/tty/serial/8250/8250_bcm7271.c | 28 +++++++++++++-------------
+ 1 file changed, 14 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
+index aa5aff046756..ff0662c68725 100644
+--- a/drivers/tty/serial/8250/8250_bcm7271.c
++++ b/drivers/tty/serial/8250/8250_bcm7271.c
+@@ -567,7 +567,7 @@ static irqreturn_t brcmuart_isr(int irq, void *dev_id)
+ if (interrupts == 0)
+ return IRQ_NONE;
+
+- spin_lock_irqsave(&up->lock, flags);
++ uart_port_lock_irqsave(up, &flags);
+
+ /* Clear all interrupts */
+ udma_writel(priv, REGS_DMA_ISR, UDMA_INTR_CLEAR, interrupts);
+@@ -581,7 +581,7 @@ static irqreturn_t brcmuart_isr(int irq, void *dev_id)
+ if ((rval | tval) == 0)
+ dev_warn(dev, "Spurious interrupt: 0x%x\n", interrupts);
+
+- spin_unlock_irqrestore(&up->lock, flags);
++ uart_port_unlock_irqrestore(up, flags);
+ return IRQ_HANDLED;
+ }
+
+@@ -608,10 +608,10 @@ static int brcmuart_startup(struct uart_port *port)
+ *
+ * Synchronize UART_IER access against the console.
+ */
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ up->ier &= ~UART_IER_RDI;
+ serial_port_out(port, UART_IER, up->ier);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+
+ priv->tx_running = false;
+ priv->dma.rx_dma = NULL;
+@@ -629,7 +629,7 @@ static void brcmuart_shutdown(struct uart_port *port)
+ struct brcmuart_priv *priv = up->port.private_data;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ priv->shutdown = true;
+ if (priv->dma_enabled) {
+ stop_rx_dma(up);
+@@ -645,7 +645,7 @@ static void brcmuart_shutdown(struct uart_port *port)
+ */
+ up->dma = NULL;
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ serial8250_do_shutdown(port);
+ }
+
+@@ -788,7 +788,7 @@ static int brcmuart_handle_irq(struct uart_port *p)
+ * interrupt but there is no data ready.
+ */
+ if (((iir & UART_IIR_ID) == UART_IIR_RX_TIMEOUT) && !(priv->shutdown)) {
+- spin_lock_irqsave(&p->lock, flags);
++ uart_port_lock_irqsave(p, &flags);
+ status = serial_port_in(p, UART_LSR);
+ if ((status & UART_LSR_DR) == 0) {
+
+@@ -813,7 +813,7 @@ static int brcmuart_handle_irq(struct uart_port *p)
+
+ handled = 1;
+ }
+- spin_unlock_irqrestore(&p->lock, flags);
++ uart_port_unlock_irqrestore(p, flags);
+ if (handled)
+ return 1;
+ }
+@@ -831,7 +831,7 @@ static enum hrtimer_restart brcmuart_hrtimer_func(struct hrtimer *t)
+ if (priv->shutdown)
+ return HRTIMER_NORESTART;
+
+- spin_lock_irqsave(&p->lock, flags);
++ uart_port_lock_irqsave(p, &flags);
+ status = serial_port_in(p, UART_LSR);
+
+ /*
+@@ -855,7 +855,7 @@ static enum hrtimer_restart brcmuart_hrtimer_func(struct hrtimer *t)
+ status |= UART_MCR_RTS;
+ serial_port_out(p, UART_MCR, status);
+ }
+- spin_unlock_irqrestore(&p->lock, flags);
++ uart_port_unlock_irqrestore(p, flags);
+ return HRTIMER_NORESTART;
+ }
+
+@@ -1154,10 +1154,10 @@ static int __maybe_unused brcmuart_suspend(struct device *dev)
+ * This will prevent resume from enabling RTS before the
+ * baud rate has been restored.
+ */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ priv->saved_mctrl = port->mctrl;
+ port->mctrl &= ~TIOCM_RTS;
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ serial8250_suspend_port(priv->line);
+ clk_disable_unprepare(priv->baud_mux_clk);
+@@ -1196,10 +1196,10 @@ static int __maybe_unused brcmuart_resume(struct device *dev)
+
+ if (priv->saved_mctrl & TIOCM_RTS) {
+ /* Restore RTS */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ port->mctrl |= TIOCM_RTS;
+ port->ops->set_mctrl(port, port->mctrl);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ return 0;
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0033-serial-8250-Use-port-lock-wrappers.patch b/buildroot-external/patches/linux/0033-serial-8250-Use-port-lock-wrappers.patch
new file mode 100644
index 00000000..de2ee7e9
--- /dev/null
+++ b/buildroot-external/patches/linux/0033-serial-8250-Use-port-lock-wrappers.patch
@@ -0,0 +1,472 @@
+From 580e52e63f02c9243031af81c819aa0098ba9d75 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner
+Date: Thu, 14 Sep 2023 20:43:23 +0206
+Subject: [PATCH 033/195] serial: 8250: Use port lock wrappers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner
+Reviewed-by: Ilpo Järvinen
+Signed-off-by: John Ogness
+Link: https://lore.kernel.org/r/20230914183831.587273-7-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ drivers/tty/serial/8250/8250_core.c | 12 ++--
+ drivers/tty/serial/8250/8250_port.c | 100 ++++++++++++++--------------
+ 2 files changed, 56 insertions(+), 56 deletions(-)
+
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index 3449f8790e46..904e319e6b4a 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -259,7 +259,7 @@ static void serial8250_backup_timeout(struct timer_list *t)
+ unsigned int iir, ier = 0, lsr;
+ unsigned long flags;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ /*
+ * Must disable interrupts or else we risk racing with the interrupt
+@@ -292,7 +292,7 @@ static void serial8250_backup_timeout(struct timer_list *t)
+ if (up->port.irq)
+ serial_out(up, UART_IER, ier);
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+
+ /* Standard timer interval plus 0.2s to keep the port running */
+ mod_timer(&up->timer,
+@@ -992,11 +992,11 @@ static void serial_8250_overrun_backoff_work(struct work_struct *work)
+ struct uart_port *port = &up->port;
+ unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ up->ier |= UART_IER_RLSI | UART_IER_RDI;
+ up->port.read_status_mask |= UART_LSR_DR;
+ serial_out(up, UART_IER, up->ier);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ /**
+@@ -1194,9 +1194,9 @@ void serial8250_unregister_port(int line)
+ if (uart->em485) {
+ unsigned long flags;
+
+- spin_lock_irqsave(&uart->port.lock, flags);
++ uart_port_lock_irqsave(&uart->port, &flags);
+ serial8250_em485_destroy(uart);
+- spin_unlock_irqrestore(&uart->port.lock, flags);
++ uart_port_unlock_irqrestore(&uart->port, flags);
+ }
+
+ uart_remove_one_port(&serial8250_reg, &uart->port);
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 141627370aab..5b57254ae975 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -689,7 +689,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
+
+ if (p->capabilities & UART_CAP_SLEEP) {
+ /* Synchronize UART_IER access against the console. */
+- spin_lock_irq(&p->port.lock);
++ uart_port_lock_irq(&p->port);
+ if (p->capabilities & UART_CAP_EFR) {
+ lcr = serial_in(p, UART_LCR);
+ efr = serial_in(p, UART_EFR);
+@@ -703,7 +703,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
+ serial_out(p, UART_EFR, efr);
+ serial_out(p, UART_LCR, lcr);
+ }
+- spin_unlock_irq(&p->port.lock);
++ uart_port_unlock_irq(&p->port);
+ }
+
+ serial8250_rpm_put(p);
+@@ -746,9 +746,9 @@ static void enable_rsa(struct uart_8250_port *up)
+ {
+ if (up->port.type == PORT_RSA) {
+ if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
+- spin_lock_irq(&up->port.lock);
++ uart_port_lock_irq(&up->port);
+ __enable_rsa(up);
+- spin_unlock_irq(&up->port.lock);
++ uart_port_unlock_irq(&up->port);
+ }
+ if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
+ serial_out(up, UART_RSA_FRR, 0);
+@@ -768,7 +768,7 @@ static void disable_rsa(struct uart_8250_port *up)
+
+ if (up->port.type == PORT_RSA &&
+ up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
+- spin_lock_irq(&up->port.lock);
++ uart_port_lock_irq(&up->port);
+
+ mode = serial_in(up, UART_RSA_MSR);
+ result = !(mode & UART_RSA_MSR_FIFO);
+@@ -781,7 +781,7 @@ static void disable_rsa(struct uart_8250_port *up)
+
+ if (result)
+ up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
+- spin_unlock_irq(&up->port.lock);
++ uart_port_unlock_irq(&up->port);
+ }
+ }
+ #endif /* CONFIG_SERIAL_8250_RSA */
+@@ -1172,7 +1172,7 @@ static void autoconfig(struct uart_8250_port *up)
+ *
+ * Synchronize UART_IER access against the console.
+ */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ up->capabilities = 0;
+ up->bugs = 0;
+@@ -1211,7 +1211,7 @@ static void autoconfig(struct uart_8250_port *up)
+ /*
+ * We failed; there's nothing here
+ */
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ DEBUG_AUTOCONF("IER test failed (%02x, %02x) ",
+ scratch2, scratch3);
+ goto out;
+@@ -1235,7 +1235,7 @@ static void autoconfig(struct uart_8250_port *up)
+ status1 = serial_in(up, UART_MSR) & UART_MSR_STATUS_BITS;
+ serial8250_out_MCR(up, save_mcr);
+ if (status1 != (UART_MSR_DCD | UART_MSR_CTS)) {
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ DEBUG_AUTOCONF("LOOP test failed (%02x) ",
+ status1);
+ goto out;
+@@ -1304,7 +1304,7 @@ static void autoconfig(struct uart_8250_port *up)
+ serial8250_clear_IER(up);
+
+ out_unlock:
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ /*
+ * Check if the device is a Fintek F81216A
+@@ -1344,9 +1344,9 @@ static void autoconfig_irq(struct uart_8250_port *up)
+ probe_irq_off(probe_irq_on());
+ save_mcr = serial8250_in_MCR(up);
+ /* Synchronize UART_IER access against the console. */
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ save_ier = serial_in(up, UART_IER);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ serial8250_out_MCR(up, UART_MCR_OUT1 | UART_MCR_OUT2);
+
+ irqs = probe_irq_on();
+@@ -1359,9 +1359,9 @@ static void autoconfig_irq(struct uart_8250_port *up)
+ UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
+ }
+ /* Synchronize UART_IER access against the console. */
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ serial_out(up, UART_IER, UART_IER_ALL_INTR);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ serial_in(up, UART_LSR);
+ serial_in(up, UART_RX);
+ serial_in(up, UART_IIR);
+@@ -1372,9 +1372,9 @@ static void autoconfig_irq(struct uart_8250_port *up)
+
+ serial8250_out_MCR(up, save_mcr);
+ /* Synchronize UART_IER access against the console. */
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ serial_out(up, UART_IER, save_ier);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+
+ if (port->flags & UPF_FOURPORT)
+ outb_p(save_ICP, ICP);
+@@ -1442,13 +1442,13 @@ static enum hrtimer_restart serial8250_em485_handle_stop_tx(struct hrtimer *t)
+ unsigned long flags;
+
+ serial8250_rpm_get(p);
+- spin_lock_irqsave(&p->port.lock, flags);
++ uart_port_lock_irqsave(&p->port, &flags);
+ if (em485->active_timer == &em485->stop_tx_timer) {
+ p->rs485_stop_tx(p);
+ em485->active_timer = NULL;
+ em485->tx_stopped = true;
+ }
+- spin_unlock_irqrestore(&p->port.lock, flags);
++ uart_port_unlock_irqrestore(&p->port, flags);
+ serial8250_rpm_put(p);
+
+ return HRTIMER_NORESTART;
+@@ -1630,12 +1630,12 @@ static enum hrtimer_restart serial8250_em485_handle_start_tx(struct hrtimer *t)
+ struct uart_8250_port *p = em485->port;
+ unsigned long flags;
+
+- spin_lock_irqsave(&p->port.lock, flags);
++ uart_port_lock_irqsave(&p->port, &flags);
+ if (em485->active_timer == &em485->start_tx_timer) {
+ __start_tx(&p->port);
+ em485->active_timer = NULL;
+ }
+- spin_unlock_irqrestore(&p->port.lock, flags);
++ uart_port_unlock_irqrestore(&p->port, flags);
+
+ return HRTIMER_NORESTART;
+ }
+@@ -1918,7 +1918,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+ if (iir & UART_IIR_NO_INT)
+ return 0;
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ status = serial_lsr_in(up);
+
+@@ -1988,9 +1988,9 @@ static int serial8250_tx_threshold_handle_irq(struct uart_port *port)
+ if ((iir & UART_IIR_ID) == UART_IIR_THRI) {
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ serial8250_tx_chars(up);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ iir = serial_port_in(port, UART_IIR);
+@@ -2005,10 +2005,10 @@ static unsigned int serial8250_tx_empty(struct uart_port *port)
+
+ serial8250_rpm_get(up);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if (!serial8250_tx_dma_running(up) && uart_lsr_tx_empty(serial_lsr_in(up)))
+ result = TIOCSER_TEMT;
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ serial8250_rpm_put(up);
+
+@@ -2070,13 +2070,13 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state)
+ unsigned long flags;
+
+ serial8250_rpm_get(up);
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if (break_state == -1)
+ up->lcr |= UART_LCR_SBC;
+ else
+ up->lcr &= ~UART_LCR_SBC;
+ serial_port_out(port, UART_LCR, up->lcr);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ serial8250_rpm_put(up);
+ }
+
+@@ -2211,7 +2211,7 @@ int serial8250_do_startup(struct uart_port *port)
+ *
+ * Synchronize UART_IER access against the console.
+ */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ up->acr = 0;
+ serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_port_out(port, UART_EFR, UART_EFR_ECB);
+@@ -2221,7 +2221,7 @@ int serial8250_do_startup(struct uart_port *port)
+ serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_port_out(port, UART_EFR, UART_EFR_ECB);
+ serial_port_out(port, UART_LCR, 0);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ if (port->type == PORT_DA830) {
+@@ -2230,10 +2230,10 @@ int serial8250_do_startup(struct uart_port *port)
+ *
+ * Synchronize UART_IER access against the console.
+ */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ serial_port_out(port, UART_IER, 0);
+ serial_port_out(port, UART_DA830_PWREMU_MGMT, 0);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ mdelay(10);
+
+ /* Enable Tx, Rx and free run mode */
+@@ -2347,7 +2347,7 @@ int serial8250_do_startup(struct uart_port *port)
+ *
+ * Synchronize UART_IER access against the console.
+ */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ wait_for_xmitr(up, UART_LSR_THRE);
+ serial_port_out_sync(port, UART_IER, UART_IER_THRI);
+@@ -2359,7 +2359,7 @@ int serial8250_do_startup(struct uart_port *port)
+ iir = serial_port_in(port, UART_IIR);
+ serial_port_out(port, UART_IER, 0);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ if (port->irqflags & IRQF_SHARED)
+ enable_irq(port->irq);
+@@ -2382,7 +2382,7 @@ int serial8250_do_startup(struct uart_port *port)
+ */
+ serial_port_out(port, UART_LCR, UART_LCR_WLEN8);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if (up->port.flags & UPF_FOURPORT) {
+ if (!up->port.irq)
+ up->port.mctrl |= TIOCM_OUT1;
+@@ -2428,7 +2428,7 @@ int serial8250_do_startup(struct uart_port *port)
+ }
+
+ dont_test_tx_en:
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ /*
+ * Clear the interrupt registers again for luck, and clear the
+@@ -2499,17 +2499,17 @@ void serial8250_do_shutdown(struct uart_port *port)
+ *
+ * Synchronize UART_IER access against the console.
+ */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ up->ier = 0;
+ serial_port_out(port, UART_IER, 0);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ synchronize_irq(port->irq);
+
+ if (up->dma)
+ serial8250_release_dma(up);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ if (port->flags & UPF_FOURPORT) {
+ /* reset interrupts on the AST Fourport board */
+ inb((port->iobase & 0xfe0) | 0x1f);
+@@ -2518,7 +2518,7 @@ void serial8250_do_shutdown(struct uart_port *port)
+ port->mctrl &= ~TIOCM_OUT2;
+
+ serial8250_set_mctrl(port, port->mctrl);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ /*
+ * Disable break condition and FIFOs
+@@ -2754,14 +2754,14 @@ void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk)
+ quot = serial8250_get_divisor(port, baud, &frac);
+
+ serial8250_rpm_get(up);
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ serial8250_set_divisor(port, baud, quot, frac);
+ serial_port_out(port, UART_LCR, up->lcr);
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ serial8250_rpm_put(up);
+
+ out_unlock:
+@@ -2798,7 +2798,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
+ * Synchronize UART_IER access against the console.
+ */
+ serial8250_rpm_get(up);
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ up->lcr = cval; /* Save computed LCR */
+
+@@ -2901,7 +2901,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
+ serial_port_out(port, UART_FCR, up->fcr); /* set fcr */
+ }
+ serial8250_set_mctrl(port, port->mctrl);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ serial8250_rpm_put(up);
+
+ /* Don't rewrite B0 */
+@@ -2924,15 +2924,15 @@ void serial8250_do_set_ldisc(struct uart_port *port, struct ktermios *termios)
+ {
+ if (termios->c_line == N_PPS) {
+ port->flags |= UPF_HARDPPS_CD;
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ serial8250_enable_ms(port);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ } else {
+ port->flags &= ~UPF_HARDPPS_CD;
+ if (!UART_ENABLE_MS(port, termios->c_cflag)) {
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ serial8250_disable_ms(port);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ }
+ }
+ }
+@@ -3406,9 +3406,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
+ touch_nmi_watchdog();
+
+ if (oops_in_progress)
+- locked = spin_trylock_irqsave(&port->lock, flags);
++ locked = uart_port_trylock_irqsave(port, &flags);
+ else
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /*
+ * First save the IER then disable the interrupts
+@@ -3478,7 +3478,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
+ serial8250_modem_status(up);
+
+ if (locked)
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ static unsigned int probe_baud(struct uart_port *port)
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0033-x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch b/buildroot-external/patches/linux/0033-x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch
deleted file mode 100644
index ea493160..00000000
--- a/buildroot-external/patches/linux/0033-x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-From 4a7f3d980fdfa78e88ce38e493b5d39384f35079 Mon Sep 17 00:00:00 2001
-From: Sebastian Andrzej Siewior
-Date: Tue, 30 Jun 2020 11:45:14 +0200
-Subject: [PATCH 33/62] x86/entry: Use should_resched() in
- idtentry_exit_cond_resched()
-
-The TIF_NEED_RESCHED bit is inlined on x86 into the preemption counter.
-By using should_resched(0) instead of need_resched() the same check can
-be performed which uses the same variable as 'preempt_count()` which was
-issued before.
-
-Use should_resched(0) instead need_resched().
-
-Signed-off-by: Sebastian Andrzej Siewior
-Signed-off-by: Thomas Gleixner
----
- kernel/entry/common.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/kernel/entry/common.c b/kernel/entry/common.c
-index be61332c66b5..97ff5faad4fb 100644
---- a/kernel/entry/common.c
-+++ b/kernel/entry/common.c
-@@ -386,7 +386,7 @@ void raw_irqentry_exit_cond_resched(void)
- rcu_irq_exit_check_preempt();
- if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
- WARN_ON_ONCE(!on_thread_stack());
-- if (need_resched())
-+ if (should_resched(0))
- preempt_schedule_irq();
- }
- }
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0034-serial-8250_dma-Use-port-lock-wrappers.patch b/buildroot-external/patches/linux/0034-serial-8250_dma-Use-port-lock-wrappers.patch
new file mode 100644
index 00000000..1ac0eec9
--- /dev/null
+++ b/buildroot-external/patches/linux/0034-serial-8250_dma-Use-port-lock-wrappers.patch
@@ -0,0 +1,85 @@
+From 70ceea48d1051ec15b3a8d66f62386b323e12e4e Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner
+Date: Thu, 14 Sep 2023 20:43:24 +0206
+Subject: [PATCH 034/195] serial: 8250_dma: Use port lock wrappers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner
+Reviewed-by: Ilpo Järvinen
+Signed-off-by: John Ogness
+Link: https://lore.kernel.org/r/20230914183831.587273-8-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ drivers/tty/serial/8250/8250_dma.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
+index 7fa66501792d..8b30ca8fdd3f 100644
+--- a/drivers/tty/serial/8250/8250_dma.c
++++ b/drivers/tty/serial/8250/8250_dma.c
+@@ -22,7 +22,7 @@ static void __dma_tx_complete(void *param)
+ dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+- spin_lock_irqsave(&p->port.lock, flags);
++ uart_port_lock_irqsave(&p->port, &flags);
+
+ dma->tx_running = 0;
+
+@@ -35,7 +35,7 @@ static void __dma_tx_complete(void *param)
+ if (ret || !dma->tx_running)
+ serial8250_set_THRI(p);
+
+- spin_unlock_irqrestore(&p->port.lock, flags);
++ uart_port_unlock_irqrestore(&p->port, flags);
+ }
+
+ static void __dma_rx_complete(struct uart_8250_port *p)
+@@ -70,7 +70,7 @@ static void dma_rx_complete(void *param)
+ struct uart_8250_dma *dma = p->dma;
+ unsigned long flags;
+
+- spin_lock_irqsave(&p->port.lock, flags);
++ uart_port_lock_irqsave(&p->port, &flags);
+ if (dma->rx_running)
+ __dma_rx_complete(p);
+
+@@ -80,7 +80,7 @@ static void dma_rx_complete(void *param)
+ */
+ if (!dma->rx_running && (serial_lsr_in(p) & UART_LSR_DR))
+ p->dma->rx_dma(p);
+- spin_unlock_irqrestore(&p->port.lock, flags);
++ uart_port_unlock_irqrestore(&p->port, flags);
+ }
+
+ int serial8250_tx_dma(struct uart_8250_port *p)
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0034-x86-Support-for-lazy-preemption.patch b/buildroot-external/patches/linux/0034-x86-Support-for-lazy-preemption.patch
deleted file mode 100644
index 0afc312d..00000000
--- a/buildroot-external/patches/linux/0034-x86-Support-for-lazy-preemption.patch
+++ /dev/null
@@ -1,157 +0,0 @@
-From b73c9a31ab7a5c00891726ff9d5da0297243d335 Mon Sep 17 00:00:00 2001
-From: Thomas Gleixner
-Date: Thu, 1 Nov 2012 11:03:47 +0100
-Subject: [PATCH 34/62] x86: Support for lazy preemption
-
-Implement the x86 pieces for lazy preempt.
-
-Signed-off-by: Thomas Gleixner
----
- arch/x86/Kconfig | 1 +
- arch/x86/include/asm/preempt.h | 33 +++++++++++++++++++++++++++++-
- arch/x86/include/asm/thread_info.h | 7 +++++++
- include/linux/entry-common.h | 2 +-
- kernel/entry/common.c | 2 +-
- 5 files changed, 42 insertions(+), 3 deletions(-)
-
-diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index c9bed9c69423..f38bd8a5061e 100644
---- a/arch/x86/Kconfig
-+++ b/arch/x86/Kconfig
-@@ -251,6 +251,7 @@ config X86
- select HAVE_PCI
- select HAVE_PERF_REGS
- select HAVE_PERF_USER_STACK_DUMP
-+ select HAVE_PREEMPT_LAZY
- select MMU_GATHER_RCU_TABLE_FREE if PARAVIRT
- select MMU_GATHER_MERGE_VMAS
- select HAVE_POSIX_CPU_TIMERS_TASK_WORK
-diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
-index 5f6daea1ee24..cd20b4a5719a 100644
---- a/arch/x86/include/asm/preempt.h
-+++ b/arch/x86/include/asm/preempt.h
-@@ -90,17 +90,48 @@ static __always_inline void __preempt_count_sub(int val)
- * a decrement which hits zero means we have no preempt_count and should
- * reschedule.
- */
--static __always_inline bool __preempt_count_dec_and_test(void)
-+static __always_inline bool ____preempt_count_dec_and_test(void)
- {
- return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var]));
- }
-
-+static __always_inline bool __preempt_count_dec_and_test(void)
-+{
-+ if (____preempt_count_dec_and_test())
-+ return true;
-+#ifdef CONFIG_PREEMPT_LAZY
-+ if (preempt_count())
-+ return false;
-+ if (current_thread_info()->preempt_lazy_count)
-+ return false;
-+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
-+#else
-+ return false;
-+#endif
-+}
-+
- /*
- * Returns true when we need to resched and can (barring IRQ state).
- */
- static __always_inline bool should_resched(int preempt_offset)
- {
-+#ifdef CONFIG_PREEMPT_LAZY
-+ u32 tmp;
-+ tmp = raw_cpu_read_4(__preempt_count);
-+ if (tmp == preempt_offset)
-+ return true;
-+
-+ /* preempt count == 0 ? */
-+ tmp &= ~PREEMPT_NEED_RESCHED;
-+ if (tmp != preempt_offset)
-+ return false;
-+ /* XXX PREEMPT_LOCK_OFFSET */
-+ if (current_thread_info()->preempt_lazy_count)
-+ return false;
-+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
-+#else
- return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
-+#endif
- }
-
- #ifdef CONFIG_PREEMPTION
-diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
-index f0cb881c1d69..0da06a9b5f72 100644
---- a/arch/x86/include/asm/thread_info.h
-+++ b/arch/x86/include/asm/thread_info.h
-@@ -57,6 +57,8 @@ struct thread_info {
- unsigned long flags; /* low level flags */
- unsigned long syscall_work; /* SYSCALL_WORK_ flags */
- u32 status; /* thread synchronous flags */
-+ int preempt_lazy_count; /* 0 => lazy preemptable
-+ <0 => BUG */
- #ifdef CONFIG_SMP
- u32 cpu; /* current CPU */
- #endif
-@@ -65,6 +67,7 @@ struct thread_info {
- #define INIT_THREAD_INFO(tsk) \
- { \
- .flags = 0, \
-+ .preempt_lazy_count = 0, \
- }
-
- #else /* !__ASSEMBLY__ */
-@@ -92,6 +95,7 @@ struct thread_info {
- #define TIF_NOCPUID 15 /* CPUID is not accessible in userland */
- #define TIF_NOTSC 16 /* TSC is not accessible in userland */
- #define TIF_NOTIFY_SIGNAL 17 /* signal notifications exist */
-+#define TIF_NEED_RESCHED_LAZY 19 /* lazy rescheduling necessary */
- #define TIF_MEMDIE 20 /* is terminating due to OOM killer */
- #define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */
- #define TIF_IO_BITMAP 22 /* uses I/O bitmap */
-@@ -115,6 +119,7 @@ struct thread_info {
- #define _TIF_NOCPUID (1 << TIF_NOCPUID)
- #define _TIF_NOTSC (1 << TIF_NOTSC)
- #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
-+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
- #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
- #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
- #define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE)
-@@ -146,6 +151,8 @@ struct thread_info {
-
- #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
-
-+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
-+
- #define STACK_WARN (THREAD_SIZE/8)
-
- /*
-diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
-index d95ab85f96ba..93cc1ae12125 100644
---- a/include/linux/entry-common.h
-+++ b/include/linux/entry-common.h
-@@ -59,7 +59,7 @@
-
- #define EXIT_TO_USER_MODE_WORK \
- (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
-- _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
-+ _TIF_NEED_RESCHED_MASK | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
- ARCH_EXIT_TO_USER_MODE_WORK)
-
- /**
-diff --git a/kernel/entry/common.c b/kernel/entry/common.c
-index 97ff5faad4fb..c6301e520d47 100644
---- a/kernel/entry/common.c
-+++ b/kernel/entry/common.c
-@@ -155,7 +155,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
-
- local_irq_enable_exit_to_user(ti_work);
-
-- if (ti_work & _TIF_NEED_RESCHED)
-+ if (ti_work & _TIF_NEED_RESCHED_MASK)
- schedule();
-
- if (ti_work & _TIF_UPROBE)
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0035-entry-Fix-the-preempt-lazy-fallout.patch b/buildroot-external/patches/linux/0035-entry-Fix-the-preempt-lazy-fallout.patch
deleted file mode 100644
index 512f11fe..00000000
--- a/buildroot-external/patches/linux/0035-entry-Fix-the-preempt-lazy-fallout.patch
+++ /dev/null
@@ -1,48 +0,0 @@
-From a17c5b9f7e3fef4ab8b0a87fa33e6c89f6c89cba Mon Sep 17 00:00:00 2001
-From: Thomas Gleixner
-Date: Tue, 13 Jul 2021 07:52:52 +0200
-Subject: [PATCH 35/62] entry: Fix the preempt lazy fallout
-
-Common code needs common defines....
-
-Fixes: f2f9e496208c ("x86: Support for lazy preemption")
-Reported-by: kernel test robot
-Signed-off-by: Thomas Gleixner
----
- arch/x86/include/asm/thread_info.h | 2 --
- include/linux/entry-common.h | 6 ++++++
- 2 files changed, 6 insertions(+), 2 deletions(-)
-
-diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
-index 0da06a9b5f72..fd8fb76f324f 100644
---- a/arch/x86/include/asm/thread_info.h
-+++ b/arch/x86/include/asm/thread_info.h
-@@ -151,8 +151,6 @@ struct thread_info {
-
- #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
-
--#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
--
- #define STACK_WARN (THREAD_SIZE/8)
-
- /*
-diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
-index 93cc1ae12125..3dc3704a3cdb 100644
---- a/include/linux/entry-common.h
-+++ b/include/linux/entry-common.h
-@@ -57,6 +57,12 @@
- # define ARCH_EXIT_TO_USER_MODE_WORK (0)
- #endif
-
-+#ifdef CONFIG_PREEMPT_LAZY
-+# define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
-+#else
-+# define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED)
-+#endif
-+
- #define EXIT_TO_USER_MODE_WORK \
- (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
- _TIF_NEED_RESCHED_MASK | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0035-serial-8250_dw-Use-port-lock-wrappers.patch b/buildroot-external/patches/linux/0035-serial-8250_dw-Use-port-lock-wrappers.patch
new file mode 100644
index 00000000..65107ccb
--- /dev/null
+++ b/buildroot-external/patches/linux/0035-serial-8250_dw-Use-port-lock-wrappers.patch
@@ -0,0 +1,74 @@
+From 5cca2b99cbc7d88d8eeb8087e9130e127c83d385 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner
+Date: Thu, 14 Sep 2023 20:43:25 +0206
+Subject: [PATCH 035/195] serial: 8250_dw: Use port lock wrappers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner
+Reviewed-by: Ilpo Järvinen
+Signed-off-by: John Ogness
+Link: https://lore.kernel.org/r/20230914183831.587273-9-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ drivers/tty/serial/8250/8250_dw.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
+index a1f2259cc9a9..53c284bb271d 100644
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -263,20 +263,20 @@ static int dw8250_handle_irq(struct uart_port *p)
+ * so we limit the workaround only to non-DMA mode.
+ */
+ if (!up->dma && rx_timeout) {
+- spin_lock_irqsave(&p->lock, flags);
++ uart_port_lock_irqsave(p, &flags);
+ status = serial_lsr_in(up);
+
+ if (!(status & (UART_LSR_DR | UART_LSR_BI)))
+ (void) p->serial_in(p, UART_RX);
+
+- spin_unlock_irqrestore(&p->lock, flags);
++ uart_port_unlock_irqrestore(p, flags);
+ }
+
+ /* Manually stop the Rx DMA transfer when acting as flow controller */
+ if (quirks & DW_UART_QUIRK_IS_DMA_FC && up->dma && up->dma->rx_running && rx_timeout) {
+- spin_lock_irqsave(&p->lock, flags);
++ uart_port_lock_irqsave(p, &flags);
+ status = serial_lsr_in(up);
+- spin_unlock_irqrestore(&p->lock, flags);
++ uart_port_unlock_irqrestore(p, flags);
+
+ if (status & (UART_LSR_DR | UART_LSR_BI)) {
+ dw8250_writel_ext(p, RZN1_UART_RDMACR, 0);
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0036-arm-Add-support-for-lazy-preemption.patch b/buildroot-external/patches/linux/0036-arm-Add-support-for-lazy-preemption.patch
deleted file mode 100644
index 8363fd29..00000000
--- a/buildroot-external/patches/linux/0036-arm-Add-support-for-lazy-preemption.patch
+++ /dev/null
@@ -1,136 +0,0 @@
-From 93b892ee12b8eb43a72f308c981f3c68c6ae8b45 Mon Sep 17 00:00:00 2001
-From: Thomas Gleixner
-Date: Wed, 31 Oct 2012 12:04:11 +0100
-Subject: [PATCH 36/62] arm: Add support for lazy preemption
-
-Implement the arm pieces for lazy preempt.
-
-Signed-off-by: Thomas Gleixner
----
- arch/arm/Kconfig | 1 +
- arch/arm/include/asm/thread_info.h | 6 +++++-
- arch/arm/kernel/asm-offsets.c | 1 +
- arch/arm/kernel/entry-armv.S | 19 ++++++++++++++++---
- arch/arm/kernel/signal.c | 3 ++-
- 5 files changed, 25 insertions(+), 5 deletions(-)
-
-diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
-index 6d5afe2e6ba3..717e596dc13b 100644
---- a/arch/arm/Kconfig
-+++ b/arch/arm/Kconfig
-@@ -115,6 +115,7 @@ config ARM
- select HAVE_PERF_EVENTS
- select HAVE_PERF_REGS
- select HAVE_PERF_USER_STACK_DUMP
-+ select HAVE_PREEMPT_LAZY
- select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE
- select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_RSEQ
-diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
-index 7f092cb55a41..ffcbf8ebed4b 100644
---- a/arch/arm/include/asm/thread_info.h
-+++ b/arch/arm/include/asm/thread_info.h
-@@ -62,6 +62,7 @@ struct cpu_context_save {
- struct thread_info {
- unsigned long flags; /* low level flags */
- int preempt_count; /* 0 => preemptable, <0 => bug */
-+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
- __u32 cpu; /* cpu */
- __u32 cpu_domain; /* cpu domain */
- struct cpu_context_save cpu_context; /* cpu context */
-@@ -129,6 +130,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
- #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
- #define TIF_UPROBE 3 /* breakpointed or singlestepping */
- #define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */
-+#define TIF_NEED_RESCHED_LAZY 5
-
- #define TIF_USING_IWMMXT 17
- #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
-@@ -148,6 +150,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
- #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
- #define _TIF_SECCOMP (1 << TIF_SECCOMP)
- #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
-+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
- #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
-
- /* Checks for any syscall work in entry-common.S */
-@@ -157,7 +160,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
- /*
- * Change these and you break ASM code in entry-common.S
- */
--#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
-+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
-+ _TIF_SIGPENDING | \
- _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
- _TIF_NOTIFY_SIGNAL)
-
-diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
-index 2c8d76fd7c66..c3bdec7d2df9 100644
---- a/arch/arm/kernel/asm-offsets.c
-+++ b/arch/arm/kernel/asm-offsets.c
-@@ -43,6 +43,7 @@ int main(void)
- BLANK();
- DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
-+ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
- DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
- DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain));
- DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context));
-diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
-index c39303e5c234..cfb4660e9fea 100644
---- a/arch/arm/kernel/entry-armv.S
-+++ b/arch/arm/kernel/entry-armv.S
-@@ -222,11 +222,18 @@ ENDPROC(__dabt_svc)
-
- #ifdef CONFIG_PREEMPTION
- ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
-- ldr r0, [tsk, #TI_FLAGS] @ get flags
- teq r8, #0 @ if preempt count != 0
-+ bne 1f @ return from exeption
-+ ldr r0, [tsk, #TI_FLAGS] @ get flags
-+ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
-+ blne svc_preempt @ preempt!
-+
-+ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
-+ teq r8, #0 @ if preempt lazy count != 0
- movne r0, #0 @ force flags to 0
-- tst r0, #_TIF_NEED_RESCHED
-+ tst r0, #_TIF_NEED_RESCHED_LAZY
- blne svc_preempt
-+1:
- #endif
-
- svc_exit r5, irq = 1 @ return from exception
-@@ -241,8 +248,14 @@ ENDPROC(__irq_svc)
- 1: bl preempt_schedule_irq @ irq en/disable is done inside
- ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
- tst r0, #_TIF_NEED_RESCHED
-+ bne 1b
-+ tst r0, #_TIF_NEED_RESCHED_LAZY
- reteq r8 @ go again
-- b 1b
-+ ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
-+ teq r0, #0 @ if preempt lazy count != 0
-+ beq 1b
-+ ret r8 @ go again
-+
- #endif
-
- __und_fault:
-diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
-index e07f359254c3..b50a3248e79f 100644
---- a/arch/arm/kernel/signal.c
-+++ b/arch/arm/kernel/signal.c
-@@ -607,7 +607,8 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
- */
- trace_hardirqs_off();
- do {
-- if (likely(thread_flags & _TIF_NEED_RESCHED)) {
-+ if (likely(thread_flags & (_TIF_NEED_RESCHED |
-+ _TIF_NEED_RESCHED_LAZY))) {
- schedule();
- } else {
- if (unlikely(!user_mode(regs)))
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0036-serial-8250_exar-Use-port-lock-wrappers.patch b/buildroot-external/patches/linux/0036-serial-8250_exar-Use-port-lock-wrappers.patch
new file mode 100644
index 00000000..4dca6ecb
--- /dev/null
+++ b/buildroot-external/patches/linux/0036-serial-8250_exar-Use-port-lock-wrappers.patch
@@ -0,0 +1,57 @@
+From 6dcc66687c18aa95a2fc928da69a9f68f97b08c2 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner
+Date: Thu, 14 Sep 2023 20:43:26 +0206
+Subject: [PATCH 036/195] serial: 8250_exar: Use port lock wrappers
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner
+Signed-off-by: John Ogness
+Link: https://lore.kernel.org/r/20230914183831.587273-10-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ drivers/tty/serial/8250/8250_exar.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
+index 8385be846840..91cf690b7c71 100644
+--- a/drivers/tty/serial/8250/8250_exar.c
++++ b/drivers/tty/serial/8250/8250_exar.c
+@@ -201,9 +201,9 @@ static int xr17v35x_startup(struct uart_port *port)
+ *
+ * Synchronize UART_IER access against the console.
+ */
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ serial_port_out(port, UART_IER, 0);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+
+ return serial8250_do_startup(port);
+ }
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0037-powerpc-Add-support-for-lazy-preemption.patch b/buildroot-external/patches/linux/0037-powerpc-Add-support-for-lazy-preemption.patch
deleted file mode 100644
index 89c86189..00000000
--- a/buildroot-external/patches/linux/0037-powerpc-Add-support-for-lazy-preemption.patch
+++ /dev/null
@@ -1,117 +0,0 @@
-From 27570b59eda95f93f62c30d343f9c913a9d2a137 Mon Sep 17 00:00:00 2001
-From: Thomas Gleixner
-Date: Thu, 1 Nov 2012 10:14:11 +0100
-Subject: [PATCH 37/62] powerpc: Add support for lazy preemption
-
-Implement the powerpc pieces for lazy preempt.
-
-Signed-off-by: Thomas Gleixner
----
- arch/powerpc/Kconfig | 1 +
- arch/powerpc/include/asm/thread_info.h | 8 ++++++++
- arch/powerpc/kernel/interrupt.c | 8 ++++++--
- 3 files changed, 15 insertions(+), 2 deletions(-)
-
-diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
-index 6050e6e10d32..0eff864d6ec3 100644
---- a/arch/powerpc/Kconfig
-+++ b/arch/powerpc/Kconfig
-@@ -242,6 +242,7 @@ config PPC
- select HAVE_PERF_EVENTS_NMI if PPC64
- select HAVE_PERF_REGS
- select HAVE_PERF_USER_STACK_DUMP
-+ select HAVE_PREEMPT_LAZY
- select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_RELIABLE_STACKTRACE
- select HAVE_RSEQ
-diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
-index af58f1ed3952..520864de8bb2 100644
---- a/arch/powerpc/include/asm/thread_info.h
-+++ b/arch/powerpc/include/asm/thread_info.h
-@@ -53,6 +53,8 @@
- struct thread_info {
- int preempt_count; /* 0 => preemptable,
- <0 => BUG */
-+ int preempt_lazy_count; /* 0 => preemptable,
-+ <0 => BUG */
- #ifdef CONFIG_SMP
- unsigned int cpu;
- #endif
-@@ -77,6 +79,7 @@ struct thread_info {
- #define INIT_THREAD_INFO(tsk) \
- { \
- .preempt_count = INIT_PREEMPT_COUNT, \
-+ .preempt_lazy_count = 0, \
- .flags = 0, \
- }
-
-@@ -102,6 +105,7 @@ void arch_setup_new_exec(void);
- #define TIF_PATCH_PENDING 6 /* pending live patching update */
- #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
- #define TIF_SINGLESTEP 8 /* singlestepping active */
-+#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */
- #define TIF_SECCOMP 10 /* secure computing */
- #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
- #define TIF_NOERROR 12 /* Force successful syscall return */
-@@ -117,6 +121,7 @@ void arch_setup_new_exec(void);
- #define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */
- #define TIF_32BIT 20 /* 32 bit binary */
-
-+
- /* as above, but as bit values */
- #define _TIF_SYSCALL_TRACE (1<msr & MSR_EE));
- again:
-- if (IS_ENABLED(CONFIG_PREEMPT)) {
-+ if (IS_ENABLED(CONFIG_PREEMPTION)) {
- /* Return to preemptible kernel context */
- if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
- if (preempt_count() == 0)
- preempt_schedule_irq();
-+ } else if (unlikely(current_thread_info()->flags & _TIF_NEED_RESCHED_LAZY)) {
-+ if ((preempt_count() == 0) &&
-+ (current_thread_info()->preempt_lazy_count == 0))
-+ preempt_schedule_irq();
- }
- }
-
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0037-serial-8250_fsl-Use-port-lock-wrappers.patch b/buildroot-external/patches/linux/0037-serial-8250_fsl-Use-port-lock-wrappers.patch
new file mode 100644
index 00000000..a8f930ce
--- /dev/null
+++ b/buildroot-external/patches/linux/0037-serial-8250_fsl-Use-port-lock-wrappers.patch
@@ -0,0 +1,68 @@
+From b7a99e7acecb90b637d07620ed35754c7152004e Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner
+Date: Thu, 14 Sep 2023 20:43:27 +0206
+Subject: [PATCH 037/195] serial: 8250_fsl: Use port lock wrappers
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner
+Signed-off-by: John Ogness
+Link: https://lore.kernel.org/r/20230914183831.587273-11-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ drivers/tty/serial/8250/8250_fsl.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
+index 6af4e1c1210a..f522eb5026c9 100644
+--- a/drivers/tty/serial/8250/8250_fsl.c
++++ b/drivers/tty/serial/8250/8250_fsl.c
+@@ -30,11 +30,11 @@ int fsl8250_handle_irq(struct uart_port *port)
+ unsigned int iir;
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ iir = port->serial_in(port, UART_IIR);
+ if (iir & UART_IIR_NO_INT) {
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ return 0;
+ }
+
+@@ -54,7 +54,7 @@ int fsl8250_handle_irq(struct uart_port *port)
+ if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) {
+ up->lsr_saved_flags &= ~UART_LSR_BI;
+ port->serial_in(port, UART_RX);
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ return 1;
+ }
+
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0038-arch-arm64-Add-lazy-preempt-support.patch b/buildroot-external/patches/linux/0038-arch-arm64-Add-lazy-preempt-support.patch
deleted file mode 100644
index b4b1b588..00000000
--- a/buildroot-external/patches/linux/0038-arch-arm64-Add-lazy-preempt-support.patch
+++ /dev/null
@@ -1,145 +0,0 @@
-From 87fb6813fa0a5ecff7fd2c657b37cfe97733ae90 Mon Sep 17 00:00:00 2001
-From: Anders Roxell
-Date: Thu, 14 May 2015 17:52:17 +0200
-Subject: [PATCH 38/62] arch/arm64: Add lazy preempt support
-
-arm64 is missing support for PREEMPT_RT. The main feature which is
-lacking is support for lazy preemption. The arch-specific entry code,
-thread information structure definitions, and associated data tables
-have to be extended to provide this support. Then the Kconfig file has
-to be extended to indicate the support is available, and also to
-indicate that support for full RT preemption is now available.
-
-Signed-off-by: Anders Roxell
-Signed-off-by: Thomas Gleixner
----
- arch/arm64/Kconfig | 1 +
- arch/arm64/include/asm/preempt.h | 25 ++++++++++++++++++++++++-
- arch/arm64/include/asm/thread_info.h | 8 +++++++-
- arch/arm64/kernel/asm-offsets.c | 1 +
- arch/arm64/kernel/signal.c | 2 +-
- 5 files changed, 34 insertions(+), 3 deletions(-)
-
-diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
-index ea70eb960565..6e16670a7f43 100644
---- a/arch/arm64/Kconfig
-+++ b/arch/arm64/Kconfig
-@@ -199,6 +199,7 @@ config ARM64
- select HAVE_PERF_USER_STACK_DUMP
- select HAVE_PREEMPT_DYNAMIC_KEY
- select HAVE_REGS_AND_STACK_ACCESS_API
-+ select HAVE_PREEMPT_LAZY
- select HAVE_POSIX_CPU_TIMERS_TASK_WORK
- select HAVE_FUNCTION_ARG_ACCESS_API
- select MMU_GATHER_RCU_TABLE_FREE
-diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
-index 0159b625cc7f..a5486918e5ee 100644
---- a/arch/arm64/include/asm/preempt.h
-+++ b/arch/arm64/include/asm/preempt.h
-@@ -71,13 +71,36 @@ static inline bool __preempt_count_dec_and_test(void)
- * interrupt occurring between the non-atomic READ_ONCE/WRITE_ONCE
- * pair.
- */
-- return !pc || !READ_ONCE(ti->preempt_count);
-+ if (!pc || !READ_ONCE(ti->preempt_count))
-+ return true;
-+#ifdef CONFIG_PREEMPT_LAZY
-+ if ((pc & ~PREEMPT_NEED_RESCHED))
-+ return false;
-+ if (current_thread_info()->preempt_lazy_count)
-+ return false;
-+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
-+#else
-+ return false;
-+#endif
- }
-
- static inline bool should_resched(int preempt_offset)
- {
-+#ifdef CONFIG_PREEMPT_LAZY
-+ u64 pc = READ_ONCE(current_thread_info()->preempt_count);
-+ if (pc == preempt_offset)
-+ return true;
-+
-+ if ((pc & ~PREEMPT_NEED_RESCHED) != preempt_offset)
-+ return false;
-+
-+ if (current_thread_info()->preempt_lazy_count)
-+ return false;
-+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
-+#else
- u64 pc = READ_ONCE(current_thread_info()->preempt_count);
- return pc == preempt_offset;
-+#endif
- }
-
- #ifdef CONFIG_PREEMPTION
-diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
-index 848739c15de8..4b7148fd5551 100644
---- a/arch/arm64/include/asm/thread_info.h
-+++ b/arch/arm64/include/asm/thread_info.h
-@@ -26,6 +26,7 @@ struct thread_info {
- #ifdef CONFIG_ARM64_SW_TTBR0_PAN
- u64 ttbr0; /* saved TTBR0_EL1 */
- #endif
-+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
- union {
- u64 preempt_count; /* 0 => preemptible, <0 => bug */
- struct {
-@@ -68,6 +69,7 @@ int arch_dup_task_struct(struct task_struct *dst,
- #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
- #define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */
- #define TIF_NOTIFY_SIGNAL 6 /* signal notifications exist */
-+#define TIF_NEED_RESCHED_LAZY 7
- #define TIF_SYSCALL_TRACE 8 /* syscall trace active */
- #define TIF_SYSCALL_AUDIT 9 /* syscall auditing */
- #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
-@@ -100,8 +102,10 @@ int arch_dup_task_struct(struct task_struct *dst,
- #define _TIF_SVE (1 << TIF_SVE)
- #define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT)
- #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
-+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
-
--#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
-+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
-+ _TIF_SIGPENDING | \
- _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
- _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \
- _TIF_NOTIFY_SIGNAL)
-@@ -110,6 +114,8 @@ int arch_dup_task_struct(struct task_struct *dst,
- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
- _TIF_SYSCALL_EMU)
-
-+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
-+
- #ifdef CONFIG_SHADOW_CALL_STACK
- #define INIT_SCS \
- .scs_base = init_shadow_call_stack, \
-diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
-index 1197e7679882..e74c0415f67e 100644
---- a/arch/arm64/kernel/asm-offsets.c
-+++ b/arch/arm64/kernel/asm-offsets.c
-@@ -32,6 +32,7 @@ int main(void)
- DEFINE(TSK_TI_CPU, offsetof(struct task_struct, thread_info.cpu));
- DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
- DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
-+ DEFINE(TSK_TI_PREEMPT_LAZY, offsetof(struct task_struct, thread_info.preempt_lazy_count));
- #ifdef CONFIG_ARM64_SW_TTBR0_PAN
- DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
- #endif
-diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
-index 82f4572c8ddf..2a606c7bf025 100644
---- a/arch/arm64/kernel/signal.c
-+++ b/arch/arm64/kernel/signal.c
-@@ -1108,7 +1108,7 @@ static void do_signal(struct pt_regs *regs)
- void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
- {
- do {
-- if (thread_flags & _TIF_NEED_RESCHED) {
-+ if (thread_flags & _TIF_NEED_RESCHED_MASK) {
- /* Unmask Debug and SError for the next task */
- local_daif_restore(DAIF_PROCCTX_NOIRQ);
-
---
-2.43.0
-
diff --git a/buildroot-external/patches/linux/0038-serial-8250_mtk-Use-port-lock-wrappers.patch b/buildroot-external/patches/linux/0038-serial-8250_mtk-Use-port-lock-wrappers.patch
new file mode 100644
index 00000000..26230f00
--- /dev/null
+++ b/buildroot-external/patches/linux/0038-serial-8250_mtk-Use-port-lock-wrappers.patch
@@ -0,0 +1,82 @@
+From a91dd09d7fdf4ded721028676a9f5e44a2a754af Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner
+Date: Thu, 14 Sep 2023 20:43:28 +0206
+Subject: [PATCH 038/195] serial: 8250_mtk: Use port lock wrappers
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner
+Reviewed-by: Chen-Yu Tsai
+Signed-off-by: John Ogness
+Link: https://lore.kernel.org/r/20230914183831.587273-12-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ drivers/tty/serial/8250/8250_mtk.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
+index 74da5676ce67..23457daae8a1 100644
+--- a/drivers/tty/serial/8250/8250_mtk.c
++++ b/drivers/tty/serial/8250/8250_mtk.c
+@@ -102,7 +102,7 @@ static void mtk8250_dma_rx_complete(void *param)
+ if (data->rx_status == DMA_RX_SHUTDOWN)
+ return;
+
+- spin_lock_irqsave(&up->port.lock, flags);
++ uart_port_lock_irqsave(&up->port, &flags);
+
+ dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
+ total = dma->rx_size - state.residue;
+@@ -128,7 +128,7 @@ static void mtk8250_dma_rx_complete(void *param)
+
+ mtk8250_rx_dma(up);
+
+- spin_unlock_irqrestore(&up->port.lock, flags);
++ uart_port_unlock_irqrestore(&up->port, flags);
+ }
+
+ static void mtk8250_rx_dma(struct uart_8250_port *up)
+@@ -368,7 +368,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+
+ /*
+ * Update the per-port timeout.
+@@ -416,7 +416,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
+ if (uart_console(port))
+ up->port.cons->cflag = termios->c_cflag;
+
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ /* Don't rewrite B0 */
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0039-serial-8250_omap-Use-port-lock-wrappers.patch b/buildroot-external/patches/linux/0039-serial-8250_omap-Use-port-lock-wrappers.patch
new file mode 100644
index 00000000..4e93c279
--- /dev/null
+++ b/buildroot-external/patches/linux/0039-serial-8250_omap-Use-port-lock-wrappers.patch
@@ -0,0 +1,241 @@
+From 4b82e23b67db0cbe2a97110597d9c97137fd3c5c Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner
+Date: Thu, 14 Sep 2023 20:43:29 +0206
+Subject: [PATCH 039/195] serial: 8250_omap: Use port lock wrappers
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner
+Signed-off-by: John Ogness
+Link: https://lore.kernel.org/r/20230914183831.587273-13-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ drivers/tty/serial/8250/8250_omap.c | 52 ++++++++++++++---------------
+ 1 file changed, 26 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index 346167afe9e1..db5519ce0192 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -401,7 +401,7 @@ static void omap_8250_set_termios(struct uart_port *port,
+ * interrupts disabled.
+ */
+ pm_runtime_get_sync(port->dev);
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+
+ /*
+ * Update the per-port timeout.
+@@ -504,7 +504,7 @@ static void omap_8250_set_termios(struct uart_port *port,
+ }
+ omap8250_restore_regs(up);
+
+- spin_unlock_irq(&up->port.lock);
++ uart_port_unlock_irq(&up->port);
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+
+@@ -529,7 +529,7 @@ static void omap_8250_pm(struct uart_port *port, unsigned int state,
+ pm_runtime_get_sync(port->dev);
+
+ /* Synchronize UART_IER access against the console. */
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ efr = serial_in(up, UART_EFR);
+@@ -541,7 +541,7 @@ static void omap_8250_pm(struct uart_port *port, unsigned int state,
+ serial_out(up, UART_EFR, efr);
+ serial_out(up, UART_LCR, 0);
+
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+@@ -660,7 +660,7 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
+ unsigned long delay;
+
+ /* Synchronize UART_IER access against the console. */
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+ up->ier = port->serial_in(port, UART_IER);
+ if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
+ port->ops->stop_rx(port);
+@@ -670,7 +670,7 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
+ */
+ cancel_delayed_work(&up->overrun_backoff);
+ }
+- spin_unlock(&port->lock);
++ uart_port_unlock(port);
+
+ delay = msecs_to_jiffies(up->overrun_backoff_time_ms);
+ schedule_delayed_work(&up->overrun_backoff, delay);
+@@ -717,10 +717,10 @@ static int omap_8250_startup(struct uart_port *port)
+ }
+
+ /* Synchronize UART_IER access against the console. */
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ up->ier = UART_IER_RLSI | UART_IER_RDI;
+ serial_out(up, UART_IER, up->ier);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+
+ #ifdef CONFIG_PM
+ up->capabilities |= UART_CAP_RPM;
+@@ -733,9 +733,9 @@ static int omap_8250_startup(struct uart_port *port)
+ serial_out(up, UART_OMAP_WER, priv->wer);
+
+ if (up->dma && !(priv->habit & UART_HAS_EFR2)) {
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ up->dma->rx_dma(up);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ }
+
+ enable_irq(up->port.irq);
+@@ -761,10 +761,10 @@ static void omap_8250_shutdown(struct uart_port *port)
+ serial_out(up, UART_OMAP_EFR2, 0x0);
+
+ /* Synchronize UART_IER access against the console. */
+- spin_lock_irq(&port->lock);
++ uart_port_lock_irq(port);
+ up->ier = 0;
+ serial_out(up, UART_IER, 0);
+- spin_unlock_irq(&port->lock);
++ uart_port_unlock_irq(port);
+ disable_irq_nosync(up->port.irq);
+ dev_pm_clear_wake_irq(port->dev);
+
+@@ -789,10 +789,10 @@ static void omap_8250_throttle(struct uart_port *port)
+
+ pm_runtime_get_sync(port->dev);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ port->ops->stop_rx(port);
+ priv->throttled = true;
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+@@ -807,14 +807,14 @@ static void omap_8250_unthrottle(struct uart_port *port)
+ pm_runtime_get_sync(port->dev);
+
+ /* Synchronize UART_IER access against the console. */
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ priv->throttled = false;
+ if (up->dma)
+ up->dma->rx_dma(up);
+ up->ier |= UART_IER_RLSI | UART_IER_RDI;
+ port->read_status_mask |= UART_LSR_DR;
+ serial_out(up, UART_IER, up->ier);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
+@@ -958,7 +958,7 @@ static void __dma_rx_complete(void *param)
+ unsigned long flags;
+
+ /* Synchronize UART_IER access against the console. */
+- spin_lock_irqsave(&p->port.lock, flags);
++ uart_port_lock_irqsave(&p->port, &flags);
+
+ /*
+ * If the tx status is not DMA_COMPLETE, then this is a delayed
+@@ -967,7 +967,7 @@ static void __dma_rx_complete(void *param)
+ */
+ if (dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state) !=
+ DMA_COMPLETE) {
+- spin_unlock_irqrestore(&p->port.lock, flags);
++ uart_port_unlock_irqrestore(&p->port, flags);
+ return;
+ }
+ __dma_rx_do_complete(p);
+@@ -978,7 +978,7 @@ static void __dma_rx_complete(void *param)
+ omap_8250_rx_dma(p);
+ }
+
+- spin_unlock_irqrestore(&p->port.lock, flags);
++ uart_port_unlock_irqrestore(&p->port, flags);
+ }
+
+ static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
+@@ -1083,7 +1083,7 @@ static void omap_8250_dma_tx_complete(void *param)
+ dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+- spin_lock_irqsave(&p->port.lock, flags);
++ uart_port_lock_irqsave(&p->port, &flags);
+
+ dma->tx_running = 0;
+
+@@ -1112,7 +1112,7 @@ static void omap_8250_dma_tx_complete(void *param)
+ serial8250_set_THRI(p);
+ }
+
+- spin_unlock_irqrestore(&p->port.lock, flags);
++ uart_port_unlock_irqrestore(&p->port, flags);
+ }
+
+ static int omap_8250_tx_dma(struct uart_8250_port *p)
+@@ -1278,7 +1278,7 @@ static int omap_8250_dma_handle_irq(struct uart_port *port)
+ return IRQ_HANDLED;
+ }
+
+- spin_lock(&port->lock);
++ uart_port_lock(port);
+
+ status = serial_port_in(port, UART_LSR);
+
+@@ -1758,15 +1758,15 @@ static int omap8250_runtime_resume(struct device *dev)
+ up = serial8250_get_port(priv->line);
+
+ if (up && omap8250_lost_context(up)) {
+- spin_lock_irq(&up->port.lock);
++ uart_port_lock_irq(&up->port);
+ omap8250_restore_regs(up);
+- spin_unlock_irq(&up->port.lock);
++ uart_port_unlock_irq(&up->port);
+ }
+
+ if (up && up->dma && up->dma->rxchan && !(priv->habit & UART_HAS_EFR2)) {
+- spin_lock_irq(&up->port.lock);
++ uart_port_lock_irq(&up->port);
+ omap_8250_rx_dma(up);
+- spin_unlock_irq(&up->port.lock);
++ uart_port_unlock_irq(&up->port);
+ }
+
+ priv->latency = priv->calc_latency;
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0040-serial-8250_pci1xxxx-Use-port-lock-wrappers.patch b/buildroot-external/patches/linux/0040-serial-8250_pci1xxxx-Use-port-lock-wrappers.patch
new file mode 100644
index 00000000..347c8de5
--- /dev/null
+++ b/buildroot-external/patches/linux/0040-serial-8250_pci1xxxx-Use-port-lock-wrappers.patch
@@ -0,0 +1,71 @@
+From b52d5efa5d1afba0b8e8c56b3fd6af134643a292 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner
+Date: Thu, 14 Sep 2023 20:43:30 +0206
+Subject: [PATCH 040/195] serial: 8250_pci1xxxx: Use port lock wrappers
+
+When a serial port is used for kernel console output, then all
+modifications to the UART registers which are done from other contexts,
+e.g. getty, termios, are interference points for the kernel console.
+
+So far this has been ignored and the printk output is based on the
+principle of hope. The rework of the console infrastructure which aims to
+support threaded and atomic consoles, requires to mark sections which
+modify the UART registers as unsafe. This allows the atomic write function
+to make informed decisions and eventually to restore operational state. It
+also allows to prevent the regular UART code from modifying UART registers
+while printk output is in progress.
+
+All modifications of UART registers are guarded by the UART port lock,
+which provides an obvious synchronization point with the console
+infrastructure.
+
+To avoid adding this functionality to all UART drivers, wrap the
+spin_[un]lock*() invocations for uart_port::lock into helper functions
+which just contain the spin_[un]lock*() invocations for now. In a
+subsequent step these helpers will gain the console synchronization
+mechanisms.
+
+Converted with coccinelle. No functional change.
+
+Signed-off-by: Thomas Gleixner
+Signed-off-by: John Ogness
+Link: https://lore.kernel.org/r/20230914183831.587273-14-john.ogness@linutronix.de
+Signed-off-by: Greg Kroah-Hartman
+Signed-off-by: Sebastian Andrzej Siewior
+---
+ drivers/tty/serial/8250/8250_pci1xxxx.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c
+index a3b25779d921..53e238c8cc89 100644
+--- a/drivers/tty/serial/8250/8250_pci1xxxx.c
++++ b/drivers/tty/serial/8250/8250_pci1xxxx.c
+@@ -225,10 +225,10 @@ static bool pci1xxxx_port_suspend(int line)
+ if (port->suspended == 0 && port->dev) {
+ wakeup_mask = readb(up->port.membase + UART_WAKE_MASK_REG);
+
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ port->mctrl &= ~TIOCM_OUT2;
+ port->ops->set_mctrl(port, port->mctrl);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+
+ ret = (wakeup_mask & UART_WAKE_SRCS) != UART_WAKE_SRCS;
+ }
+@@ -251,10 +251,10 @@ static void pci1xxxx_port_resume(int line)
+ writeb(UART_WAKE_SRCS, port->membase + UART_WAKE_REG);
+
+ if (port->suspended == 0) {
+- spin_lock_irqsave(&port->lock, flags);
++ uart_port_lock_irqsave(port, &flags);
+ port->mctrl |= TIOCM_OUT2;
+ port->ops->set_mctrl(port, port->mctrl);
+- spin_unlock_irqrestore(&port->lock, flags);
++ uart_port_unlock_irqrestore(port, flags);
+ }
+ mutex_unlock(&tport->mutex);
+ }
+--
+2.43.0
+
diff --git a/buildroot-external/patches/linux/0041-serial-altera_jtaguart-Use-port-lock-wrappers.patch b/buildroot-external/patches/linux/0041-serial-altera_jtaguart-Use-port-lock-wrappers.patch
new file mode 100644
index 00000000..8aa15db2
--- /dev/null
+++ b/buildroot-external/patches/linux/0041-serial-altera_jtaguart-Use-port-lock-wrappers.patch
@@ -0,0 +1,138 @@
+From 174bb9e1e1fa3d4ef2c46054621435dbf7a7dc66 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner