[All] (testing) Bumpe kernel to 6.6.14
This commit is contained in:
parent
4fc5231981
commit
fffa51ff5f
|
@ -1 +1 @@
|
|||
Subproject commit 0d33775f295ab64bc5eda1cabc2e7c91282dc150
|
||||
Subproject commit bdebc13b116e70b77333eee454f0683afcbf5d1a
|
|
@ -29,7 +29,7 @@ BR2_ROOTFS_POST_IMAGE_SCRIPT="$(BR2_EXTERNAL)/scripts/post-image.sh"
|
|||
BR2_ROOTFS_POST_SCRIPT_ARGS="$(BR2_EXTERNAL)/board/ovos/ova"
|
||||
BR2_LINUX_KERNEL=y
|
||||
BR2_LINUX_KERNEL_CUSTOM_VERSION=y
|
||||
BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE="6.1.69"
|
||||
BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE="6.6.14"
|
||||
BR2_LINUX_KERNEL_DEFCONFIG="x86_64"
|
||||
BR2_LINUX_KERNEL_CONFIG_FRAGMENT_FILES="$(BR2_EXTERNAL)/kernel/ovos.config $(BR2_EXTERNAL)/kernel/device-drivers.config $(BR2_EXTERNAL)/kernel/docker.config $(BR2_EXTERNAL)/board/ovos/ova/kernel.config"
|
||||
BR2_LINUX_KERNEL_LZ4=y
|
||||
|
|
|
@ -30,7 +30,7 @@ BR2_ROOTFS_POST_IMAGE_SCRIPT="$(BR2_EXTERNAL)/scripts/post-image.sh"
|
|||
BR2_ROOTFS_POST_SCRIPT_ARGS="$(BR2_EXTERNAL)/board/ovos/raspberrypi/rpi3"
|
||||
BR2_LINUX_KERNEL=y
|
||||
BR2_LINUX_KERNEL_CUSTOM_TARBALL=y
|
||||
BR2_LINUX_KERNEL_CUSTOM_TARBALL_LOCATION="$(call github,raspberrypi,linux,3bb5880ab3dd31f75c07c3c33bf29c5d469b28f3)/linux-3bb5880ab3dd31f75c07c3c33bf29c5d469b28f3.tar.gz"
|
||||
BR2_LINUX_KERNEL_CUSTOM_TARBALL_LOCATION="$(call github,raspberrypi,linux,5e78d297b997dcc7a78ba747a62fb28d0b6a10d8)/linux-5e78d297b997dcc7a78ba747a62fb28d0b6a10d8.tar.gz"
|
||||
BR2_LINUX_KERNEL_DEFCONFIG="bcmrpi3"
|
||||
BR2_LINUX_KERNEL_CONFIG_FRAGMENT_FILES="$(BR2_EXTERNAL)/kernel/ovos.config $(BR2_EXTERNAL)/kernel/device-drivers.config $(BR2_EXTERNAL)/kernel/docker.config $(BR2_EXTERNAL)/board/ovos/raspberrypi/kernel.config"
|
||||
BR2_LINUX_KERNEL_LZ4=y
|
||||
|
|
|
@ -30,7 +30,7 @@ BR2_ROOTFS_POST_IMAGE_SCRIPT="$(BR2_EXTERNAL)/scripts/post-image.sh"
|
|||
BR2_ROOTFS_POST_SCRIPT_ARGS="$(BR2_EXTERNAL)/board/ovos/raspberrypi/rpi4"
|
||||
BR2_LINUX_KERNEL=y
|
||||
BR2_LINUX_KERNEL_CUSTOM_TARBALL=y
|
||||
BR2_LINUX_KERNEL_CUSTOM_TARBALL_LOCATION="$(call github,raspberrypi,linux,3bb5880ab3dd31f75c07c3c33bf29c5d469b28f3)/linux-3bb5880ab3dd31f75c07c3c33bf29c5d469b28f3.tar.gz"
|
||||
BR2_LINUX_KERNEL_CUSTOM_TARBALL_LOCATION="$(call github,raspberrypi,linux,5e78d297b997dcc7a78ba747a62fb28d0b6a10d8)/linux-5e78d297b997dcc7a78ba747a62fb28d0b6a10d8.tar.gz"
|
||||
BR2_LINUX_KERNEL_DEFCONFIG="bcm2711"
|
||||
BR2_LINUX_KERNEL_CONFIG_FRAGMENT_FILES="$(BR2_EXTERNAL)/kernel/ovos.config $(BR2_EXTERNAL)/kernel/device-drivers.config $(BR2_EXTERNAL)/kernel/docker.config $(BR2_EXTERNAL)/board/ovos/raspberrypi/kernel.config"
|
||||
BR2_LINUX_KERNEL_LZ4=y
|
||||
|
|
|
@ -29,7 +29,7 @@ BR2_ROOTFS_POST_IMAGE_SCRIPT="$(BR2_EXTERNAL)/scripts/post-image.sh"
|
|||
BR2_ROOTFS_POST_SCRIPT_ARGS="$(BR2_EXTERNAL)/board/ovos/pc"
|
||||
BR2_LINUX_KERNEL=y
|
||||
BR2_LINUX_KERNEL_CUSTOM_VERSION=y
|
||||
BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE="6.1.73"
|
||||
BR2_LINUX_KERNEL_CUSTOM_VERSION_VALUE="6.6.14"
|
||||
BR2_LINUX_KERNEL_DEFCONFIG="x86_64"
|
||||
BR2_LINUX_KERNEL_CONFIG_FRAGMENT_FILES="$(BR2_EXTERNAL)/kernel/ovos.config $(BR2_EXTERNAL)/kernel/device-drivers.config $(BR2_EXTERNAL)/kernel/docker.config $(BR2_EXTERNAL)/board/ovos/pc/kernel.config"
|
||||
BR2_LINUX_KERNEL_LZ4=y
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
From 71103fe2c85e989e7b5374cead80c0d75425f1de Mon Sep 17 00:00:00 2001
|
||||
From: Peter Zijlstra <peterz@infradead.org>
|
||||
Date: Fri, 8 Sep 2023 18:22:48 +0200
|
||||
Subject: [PATCH 001/195] sched: Constrain locks in sched_submit_work()
|
||||
|
||||
Even though sched_submit_work() is ran from preemptible context,
|
||||
it is discouraged to have it use blocking locks due to the recursion
|
||||
potential.
|
||||
|
||||
Enforce this.
|
||||
|
||||
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
||||
Link: https://lkml.kernel.org/r/20230908162254.999499-2-bigeasy@linutronix.de
|
||||
---
|
||||
kernel/sched/core.c | 9 +++++++++
|
||||
1 file changed, 9 insertions(+)
|
||||
|
||||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
||||
index a854b71836dd..a9bf40d18cec 100644
|
||||
--- a/kernel/sched/core.c
|
||||
+++ b/kernel/sched/core.c
|
||||
@@ -6721,11 +6721,18 @@ void __noreturn do_task_dead(void)
|
||||
|
||||
static inline void sched_submit_work(struct task_struct *tsk)
|
||||
{
|
||||
+ static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
|
||||
unsigned int task_flags;
|
||||
|
||||
if (task_is_running(tsk))
|
||||
return;
|
||||
|
||||
+ /*
|
||||
+ * Establish LD_WAIT_CONFIG context to ensure none of the code called
|
||||
+ * will use a blocking primitive -- which would lead to recursion.
|
||||
+ */
|
||||
+ lock_map_acquire_try(&sched_map);
|
||||
+
|
||||
task_flags = tsk->flags;
|
||||
/*
|
||||
* If a worker goes to sleep, notify and ask workqueue whether it
|
||||
@@ -6750,6 +6757,8 @@ static inline void sched_submit_work(struct task_struct *tsk)
|
||||
* make sure to submit it to avoid deadlocks.
|
||||
*/
|
||||
blk_flush_plug(tsk->plug, true);
|
||||
+
|
||||
+ lock_map_release(&sched_map);
|
||||
}
|
||||
|
||||
static void sched_update_worker(struct task_struct *tsk)
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
From fad2ed9bfa2fce870133fadd15b4d12a26213096 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Tue, 16 Aug 2022 09:45:22 +0200
|
||||
Subject: [PATCH 01/62] vduse: Remove include of rwlock.h
|
||||
|
||||
rwlock.h should not be included directly. Instead linux/splinlock.h
|
||||
should be included. Including it directly will break the RT build.
|
||||
|
||||
Remove the rwlock.h include.
|
||||
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Acked-by: Michael S. Tsirkin <mst@redhat.com>
|
||||
Link: https://lkml.kernel.org/r/20221026134407.711768-1-bigeasy@linutronix.de
|
||||
---
|
||||
drivers/vdpa/vdpa_user/iova_domain.h | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
|
||||
index 4e0e50e7ac15..173e979b84a9 100644
|
||||
--- a/drivers/vdpa/vdpa_user/iova_domain.h
|
||||
+++ b/drivers/vdpa/vdpa_user/iova_domain.h
|
||||
@@ -14,7 +14,6 @@
|
||||
#include <linux/iova.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/vhost_iotlb.h>
|
||||
-#include <linux/rwlock.h>
|
||||
|
||||
#define IOVA_START_PFN 1
|
||||
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,86 @@
|
|||
From 30c0fabb5665235a7e6dd945be69945e9c6dc069 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Fri, 8 Sep 2023 18:22:49 +0200
|
||||
Subject: [PATCH 002/195] locking/rtmutex: Avoid unconditional slowpath for
|
||||
DEBUG_RT_MUTEXES
|
||||
|
||||
With DEBUG_RT_MUTEXES enabled the fast-path rt_mutex_cmpxchg_acquire()
|
||||
always fails and all lock operations take the slow path.
|
||||
|
||||
Provide a new helper inline rt_mutex_try_acquire() which maps to
|
||||
rt_mutex_cmpxchg_acquire() in the non-debug case. For the debug case
|
||||
it invokes rt_mutex_slowtrylock() which can acquire a non-contended
|
||||
rtmutex under full debug coverage.
|
||||
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
||||
Link: https://lkml.kernel.org/r/20230908162254.999499-3-bigeasy@linutronix.de
|
||||
---
|
||||
kernel/locking/rtmutex.c | 21 ++++++++++++++++++++-
|
||||
kernel/locking/ww_rt_mutex.c | 2 +-
|
||||
2 files changed, 21 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
|
||||
index 21db0df0eb00..bcec0533a0cc 100644
|
||||
--- a/kernel/locking/rtmutex.c
|
||||
+++ b/kernel/locking/rtmutex.c
|
||||
@@ -218,6 +218,11 @@ static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
|
||||
return try_cmpxchg_acquire(&lock->owner, &old, new);
|
||||
}
|
||||
|
||||
+static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
|
||||
+{
|
||||
+ return rt_mutex_cmpxchg_acquire(lock, NULL, current);
|
||||
+}
|
||||
+
|
||||
static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
|
||||
struct task_struct *old,
|
||||
struct task_struct *new)
|
||||
@@ -297,6 +302,20 @@ static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
|
||||
|
||||
}
|
||||
|
||||
+static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock);
|
||||
+
|
||||
+static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
|
||||
+{
|
||||
+ /*
|
||||
+ * With debug enabled rt_mutex_cmpxchg trylock() will always fail.
|
||||
+ *
|
||||
+ * Avoid unconditionally taking the slow path by using
|
||||
+ * rt_mutex_slow_trylock() which is covered by the debug code and can
|
||||
+ * acquire a non-contended rtmutex.
|
||||
+ */
|
||||
+ return rt_mutex_slowtrylock(lock);
|
||||
+}
|
||||
+
|
||||
static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
|
||||
struct task_struct *old,
|
||||
struct task_struct *new)
|
||||
@@ -1755,7 +1774,7 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
|
||||
static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
|
||||
unsigned int state)
|
||||
{
|
||||
- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
|
||||
+ if (likely(rt_mutex_try_acquire(lock)))
|
||||
return 0;
|
||||
|
||||
return rt_mutex_slowlock(lock, NULL, state);
|
||||
diff --git a/kernel/locking/ww_rt_mutex.c b/kernel/locking/ww_rt_mutex.c
|
||||
index d1473c624105..c7196de838ed 100644
|
||||
--- a/kernel/locking/ww_rt_mutex.c
|
||||
+++ b/kernel/locking/ww_rt_mutex.c
|
||||
@@ -62,7 +62,7 @@ __ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
||||
}
|
||||
mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip);
|
||||
|
||||
- if (likely(rt_mutex_cmpxchg_acquire(&rtm->rtmutex, NULL, current))) {
|
||||
+ if (likely(rt_mutex_try_acquire(&rtm->rtmutex))) {
|
||||
if (ww_ctx)
|
||||
ww_mutex_set_context_fastpath(lock, ww_ctx);
|
||||
return 0;
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,65 +0,0 @@
|
|||
From 2defd6085e9803ba06f6b56f6f901309462761a6 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Wed, 22 Jun 2022 11:36:17 +0200
|
||||
Subject: [PATCH 02/62] signal: Don't disable preemption in ptrace_stop() on
|
||||
PREEMPT_RT.
|
||||
|
||||
Commit
|
||||
53da1d9456fe7 ("fix ptrace slowness")
|
||||
|
||||
is just band aid around the problem.
|
||||
The invocation of do_notify_parent_cldstop() wakes the parent and makes
|
||||
it runnable. The scheduler then wants to replace this still running task
|
||||
with the parent. With the read_lock() acquired this is not possible
|
||||
because preemption is disabled and so this is deferred until read_unlock().
|
||||
This scheduling point is undesired and is avoided by disabling preemption
|
||||
around the unlock operation enabled again before the schedule() invocation
|
||||
without a preemption point.
|
||||
This is only undesired because the parent sleeps a cycle in
|
||||
wait_task_inactive() until the traced task leaves the run-queue in
|
||||
schedule(). It is not a correctness issue, it is just band aid to avoid the
|
||||
visbile delay which sums up over multiple invocations.
|
||||
The task can still be preempted if an interrupt occurs between
|
||||
preempt_enable_no_resched() and freezable_schedule() because on the IRQ-exit
|
||||
path of the interrupt scheduling _will_ happen. This is ignored since it does
|
||||
not happen very often.
|
||||
|
||||
On PREEMPT_RT keeping preemption disabled during the invocation of
|
||||
cgroup_enter_frozen() becomes a problem because the function acquires
|
||||
css_set_lock which is a sleeping lock on PREEMPT_RT and must not be
|
||||
acquired with disabled preemption.
|
||||
|
||||
Don't disable preemption on PREEMPT_RT. Remove the TODO regarding adding
|
||||
read_unlock_no_resched() as there is no need for it and will cause harm.
|
||||
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Link: https://lkml.kernel.org/r/20220720154435.232749-2-bigeasy@linutronix.de
|
||||
---
|
||||
kernel/signal.c | 8 ++++----
|
||||
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/kernel/signal.c b/kernel/signal.c
|
||||
index 5d45f5da2b36..58e919c7c936 100644
|
||||
--- a/kernel/signal.c
|
||||
+++ b/kernel/signal.c
|
||||
@@ -2302,13 +2302,13 @@ static int ptrace_stop(int exit_code, int why, unsigned long message,
|
||||
/*
|
||||
* Don't want to allow preemption here, because
|
||||
* sys_ptrace() needs this task to be inactive.
|
||||
- *
|
||||
- * XXX: implement read_unlock_no_resched().
|
||||
*/
|
||||
- preempt_disable();
|
||||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
+ preempt_disable();
|
||||
read_unlock(&tasklist_lock);
|
||||
cgroup_enter_frozen();
|
||||
- preempt_enable_no_resched();
|
||||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
+ preempt_enable_no_resched();
|
||||
schedule();
|
||||
cgroup_leave_frozen(true);
|
||||
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,151 +0,0 @@
|
|||
From af0232b0cb6d5afed7dab020d7dca4759ad93757 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Wed, 22 Jun 2022 12:27:05 +0200
|
||||
Subject: [PATCH 03/62] sched: Consider task_struct::saved_state in
|
||||
wait_task_inactive().
|
||||
|
||||
Ptrace is using wait_task_inactive() to wait for the tracee to reach a
|
||||
certain task state. On PREEMPT_RT that state may be stored in
|
||||
task_struct::saved_state while the tracee blocks on a sleeping lock and
|
||||
task_struct::__state is set to TASK_RTLOCK_WAIT.
|
||||
It is not possible to check only for TASK_RTLOCK_WAIT to be sure that the task
|
||||
is blocked on a sleeping lock because during wake up (after the sleeping lock
|
||||
has been acquired) the task state is set TASK_RUNNING. After the task in on CPU
|
||||
and acquired the pi_lock it will reset the state accordingly but until then
|
||||
TASK_RUNNING will be observed (with the desired state saved in saved_state).
|
||||
|
||||
Check also for task_struct::saved_state if the desired match was not found in
|
||||
task_struct::__state on PREEMPT_RT. If the state was found in saved_state, wait
|
||||
until the task is idle and state is visible in task_struct::__state.
|
||||
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Reviewed-by: Valentin Schneider <vschneid@redhat.com>
|
||||
Link: https://lkml.kernel.org/r/Yt%2FpQAFQ1xKNK0RY@linutronix.de
|
||||
---
|
||||
kernel/sched/core.c | 81 ++++++++++++++++++++++++++++++++++++++++++---
|
||||
1 file changed, 76 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
||||
index 18a4f8f28a25..6bd06122850a 100644
|
||||
--- a/kernel/sched/core.c
|
||||
+++ b/kernel/sched/core.c
|
||||
@@ -3281,6 +3281,76 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
|
||||
}
|
||||
#endif /* CONFIG_NUMA_BALANCING */
|
||||
|
||||
+#ifdef CONFIG_PREEMPT_RT
|
||||
+
|
||||
+/*
|
||||
+ * Consider:
|
||||
+ *
|
||||
+ * set_special_state(X);
|
||||
+ *
|
||||
+ * do_things()
|
||||
+ * // Somewhere in there is an rtlock that can be contended:
|
||||
+ * current_save_and_set_rtlock_wait_state();
|
||||
+ * [...]
|
||||
+ * schedule_rtlock(); (A)
|
||||
+ * [...]
|
||||
+ * current_restore_rtlock_saved_state();
|
||||
+ *
|
||||
+ * schedule(); (B)
|
||||
+ *
|
||||
+ * If p->saved_state is anything else than TASK_RUNNING, then p blocked on an
|
||||
+ * rtlock (A) *before* voluntarily calling into schedule() (B) after setting its
|
||||
+ * state to X. For things like ptrace (X=TASK_TRACED), the task could have more
|
||||
+ * work to do upon acquiring the lock in do_things() before whoever called
|
||||
+ * wait_task_inactive() should return. IOW, we have to wait for:
|
||||
+ *
|
||||
+ * p.saved_state = TASK_RUNNING
|
||||
+ * p.__state = X
|
||||
+ *
|
||||
+ * which implies the task isn't blocked on an RT lock and got to schedule() (B).
|
||||
+ *
|
||||
+ * Also see comments in ttwu_state_match().
|
||||
+ */
|
||||
+
|
||||
+static __always_inline bool state_mismatch(struct task_struct *p, unsigned int match_state)
|
||||
+{
|
||||
+ unsigned long flags;
|
||||
+ bool mismatch;
|
||||
+
|
||||
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
|
||||
+ if (READ_ONCE(p->__state) & match_state)
|
||||
+ mismatch = false;
|
||||
+ else if (READ_ONCE(p->saved_state) & match_state)
|
||||
+ mismatch = false;
|
||||
+ else
|
||||
+ mismatch = true;
|
||||
+
|
||||
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
||||
+ return mismatch;
|
||||
+}
|
||||
+static __always_inline bool state_match(struct task_struct *p, unsigned int match_state,
|
||||
+ bool *wait)
|
||||
+{
|
||||
+ if (READ_ONCE(p->__state) & match_state)
|
||||
+ return true;
|
||||
+ if (READ_ONCE(p->saved_state) & match_state) {
|
||||
+ *wait = true;
|
||||
+ return true;
|
||||
+ }
|
||||
+ return false;
|
||||
+}
|
||||
+#else
|
||||
+static __always_inline bool state_mismatch(struct task_struct *p, unsigned int match_state)
|
||||
+{
|
||||
+ return !(READ_ONCE(p->__state) & match_state);
|
||||
+}
|
||||
+static __always_inline bool state_match(struct task_struct *p, unsigned int match_state,
|
||||
+ bool *wait)
|
||||
+{
|
||||
+ return (READ_ONCE(p->__state) & match_state);
|
||||
+}
|
||||
+#endif
|
||||
+
|
||||
/*
|
||||
* wait_task_inactive - wait for a thread to unschedule.
|
||||
*
|
||||
@@ -3299,7 +3369,7 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
|
||||
*/
|
||||
unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
|
||||
{
|
||||
- int running, queued;
|
||||
+ bool running, wait;
|
||||
struct rq_flags rf;
|
||||
unsigned long ncsw;
|
||||
struct rq *rq;
|
||||
@@ -3325,7 +3395,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
|
||||
* is actually now running somewhere else!
|
||||
*/
|
||||
while (task_on_cpu(rq, p)) {
|
||||
- if (!(READ_ONCE(p->__state) & match_state))
|
||||
+ if (state_mismatch(p, match_state))
|
||||
return 0;
|
||||
cpu_relax();
|
||||
}
|
||||
@@ -3338,9 +3408,10 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
|
||||
rq = task_rq_lock(p, &rf);
|
||||
trace_sched_wait_task(p);
|
||||
running = task_on_cpu(rq, p);
|
||||
- queued = task_on_rq_queued(p);
|
||||
+ wait = task_on_rq_queued(p);
|
||||
ncsw = 0;
|
||||
- if (READ_ONCE(p->__state) & match_state)
|
||||
+
|
||||
+ if (state_match(p, match_state, &wait))
|
||||
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
|
||||
task_rq_unlock(rq, p, &rf);
|
||||
|
||||
@@ -3370,7 +3441,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
|
||||
* running right now), it's preempted, and we should
|
||||
* yield - it could be a while.
|
||||
*/
|
||||
- if (unlikely(queued)) {
|
||||
+ if (unlikely(wait)) {
|
||||
ktime_t to = NSEC_PER_SEC / HZ;
|
||||
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,63 @@
|
|||
From 67e70cee63df0dcf79656f4902fb1a563a9bd28f Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Fri, 8 Sep 2023 18:22:50 +0200
|
||||
Subject: [PATCH 003/195] sched: Extract __schedule_loop()
|
||||
|
||||
There are currently two implementations of this basic __schedule()
|
||||
loop, and there is soon to be a third.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
||||
Link: https://lkml.kernel.org/r/20230908162254.999499-4-bigeasy@linutronix.de
|
||||
---
|
||||
kernel/sched/core.c | 21 +++++++++++----------
|
||||
1 file changed, 11 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
||||
index a9bf40d18cec..ed5f5e3f6239 100644
|
||||
--- a/kernel/sched/core.c
|
||||
+++ b/kernel/sched/core.c
|
||||
@@ -6771,16 +6771,21 @@ static void sched_update_worker(struct task_struct *tsk)
|
||||
}
|
||||
}
|
||||
|
||||
-asmlinkage __visible void __sched schedule(void)
|
||||
+static __always_inline void __schedule_loop(unsigned int sched_mode)
|
||||
{
|
||||
- struct task_struct *tsk = current;
|
||||
-
|
||||
- sched_submit_work(tsk);
|
||||
do {
|
||||
preempt_disable();
|
||||
- __schedule(SM_NONE);
|
||||
+ __schedule(sched_mode);
|
||||
sched_preempt_enable_no_resched();
|
||||
} while (need_resched());
|
||||
+}
|
||||
+
|
||||
+asmlinkage __visible void __sched schedule(void)
|
||||
+{
|
||||
+ struct task_struct *tsk = current;
|
||||
+
|
||||
+ sched_submit_work(tsk);
|
||||
+ __schedule_loop(SM_NONE);
|
||||
sched_update_worker(tsk);
|
||||
}
|
||||
EXPORT_SYMBOL(schedule);
|
||||
@@ -6844,11 +6849,7 @@ void __sched schedule_preempt_disabled(void)
|
||||
#ifdef CONFIG_PREEMPT_RT
|
||||
void __sched notrace schedule_rtlock(void)
|
||||
{
|
||||
- do {
|
||||
- preempt_disable();
|
||||
- __schedule(SM_RTLOCK_WAIT);
|
||||
- sched_preempt_enable_no_resched();
|
||||
- } while (need_resched());
|
||||
+ __schedule_loop(SM_RTLOCK_WAIT);
|
||||
}
|
||||
NOKPROBE_SYMBOL(schedule_rtlock);
|
||||
#endif
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,137 @@
|
|||
From 9e47eb505e7f3ed5408d3d66e91ebb6982768023 Mon Sep 17 00:00:00 2001
|
||||
From: Peter Zijlstra <peterz@infradead.org>
|
||||
Date: Fri, 8 Sep 2023 18:22:51 +0200
|
||||
Subject: [PATCH 004/195] sched: Provide rt_mutex specific scheduler helpers
|
||||
|
||||
With PREEMPT_RT there is a rt_mutex recursion problem where
|
||||
sched_submit_work() can use an rtlock (aka spinlock_t). More
|
||||
specifically what happens is:
|
||||
|
||||
mutex_lock() /* really rt_mutex */
|
||||
...
|
||||
__rt_mutex_slowlock_locked()
|
||||
task_blocks_on_rt_mutex()
|
||||
// enqueue current task as waiter
|
||||
// do PI chain walk
|
||||
rt_mutex_slowlock_block()
|
||||
schedule()
|
||||
sched_submit_work()
|
||||
...
|
||||
spin_lock() /* really rtlock */
|
||||
...
|
||||
__rt_mutex_slowlock_locked()
|
||||
task_blocks_on_rt_mutex()
|
||||
// enqueue current task as waiter *AGAIN*
|
||||
// *CONFUSION*
|
||||
|
||||
Fix this by making rt_mutex do the sched_submit_work() early, before
|
||||
it enqueues itself as a waiter -- before it even knows *if* it will
|
||||
wait.
|
||||
|
||||
[[ basically Thomas' patch but with different naming and a few asserts
|
||||
added ]]
|
||||
|
||||
Originally-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
||||
Link: https://lkml.kernel.org/r/20230908162254.999499-5-bigeasy@linutronix.de
|
||||
---
|
||||
include/linux/sched.h | 3 +++
|
||||
include/linux/sched/rt.h | 4 ++++
|
||||
kernel/sched/core.c | 36 ++++++++++++++++++++++++++++++++----
|
||||
3 files changed, 39 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
||||
index 77f01ac385f7..67623ffd4a8e 100644
|
||||
--- a/include/linux/sched.h
|
||||
+++ b/include/linux/sched.h
|
||||
@@ -911,6 +911,9 @@ struct task_struct {
|
||||
* ->sched_remote_wakeup gets used, so it can be in this word.
|
||||
*/
|
||||
unsigned sched_remote_wakeup:1;
|
||||
+#ifdef CONFIG_RT_MUTEXES
|
||||
+ unsigned sched_rt_mutex:1;
|
||||
+#endif
|
||||
|
||||
/* Bit to tell LSMs we're in execve(): */
|
||||
unsigned in_execve:1;
|
||||
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
|
||||
index 994c25640e15..b2b9e6eb9683 100644
|
||||
--- a/include/linux/sched/rt.h
|
||||
+++ b/include/linux/sched/rt.h
|
||||
@@ -30,6 +30,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RT_MUTEXES
|
||||
+extern void rt_mutex_pre_schedule(void);
|
||||
+extern void rt_mutex_schedule(void);
|
||||
+extern void rt_mutex_post_schedule(void);
|
||||
+
|
||||
/*
|
||||
* Must hold either p->pi_lock or task_rq(p)->lock.
|
||||
*/
|
||||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
||||
index ed5f5e3f6239..90f9124ac027 100644
|
||||
--- a/kernel/sched/core.c
|
||||
+++ b/kernel/sched/core.c
|
||||
@@ -6724,9 +6724,6 @@ static inline void sched_submit_work(struct task_struct *tsk)
|
||||
static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
|
||||
unsigned int task_flags;
|
||||
|
||||
- if (task_is_running(tsk))
|
||||
- return;
|
||||
-
|
||||
/*
|
||||
* Establish LD_WAIT_CONFIG context to ensure none of the code called
|
||||
* will use a blocking primitive -- which would lead to recursion.
|
||||
@@ -6784,7 +6781,12 @@ asmlinkage __visible void __sched schedule(void)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
- sched_submit_work(tsk);
|
||||
+#ifdef CONFIG_RT_MUTEXES
|
||||
+ lockdep_assert(!tsk->sched_rt_mutex);
|
||||
+#endif
|
||||
+
|
||||
+ if (!task_is_running(tsk))
|
||||
+ sched_submit_work(tsk);
|
||||
__schedule_loop(SM_NONE);
|
||||
sched_update_worker(tsk);
|
||||
}
|
||||
@@ -7045,6 +7047,32 @@ static void __setscheduler_prio(struct task_struct *p, int prio)
|
||||
|
||||
#ifdef CONFIG_RT_MUTEXES
|
||||
|
||||
+/*
|
||||
+ * Would be more useful with typeof()/auto_type but they don't mix with
|
||||
+ * bit-fields. Since it's a local thing, use int. Keep the generic sounding
|
||||
+ * name such that if someone were to implement this function we get to compare
|
||||
+ * notes.
|
||||
+ */
|
||||
+#define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
|
||||
+
|
||||
+void rt_mutex_pre_schedule(void)
|
||||
+{
|
||||
+ lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
|
||||
+ sched_submit_work(current);
|
||||
+}
|
||||
+
|
||||
+void rt_mutex_schedule(void)
|
||||
+{
|
||||
+ lockdep_assert(current->sched_rt_mutex);
|
||||
+ __schedule_loop(SM_NONE);
|
||||
+}
|
||||
+
|
||||
+void rt_mutex_post_schedule(void)
|
||||
+{
|
||||
+ sched_update_worker(current);
|
||||
+ lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
|
||||
+}
|
||||
+
|
||||
static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
|
||||
{
|
||||
if (pi_task)
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,39 +0,0 @@
|
|||
From 501df2b62fce3d3cea107bfc7c8c28283a62dc01 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 25 Aug 2022 16:15:32 +0200
|
||||
Subject: [PATCH 04/62] spi: Remove the obsolte u64_stats_fetch_*_irq() users.
|
||||
|
||||
Now that the 32bit UP oddity is gone and 32bit uses always a sequence
|
||||
count, there is no need for the fetch_irq() variants anymore.
|
||||
|
||||
Convert to the regular interface.
|
||||
|
||||
Cc: Mark Brown <broonie@kernel.org>
|
||||
Cc: linux-spi@vger.kernel.org
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
||||
---
|
||||
drivers/spi/spi.c | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
|
||||
index 22d227878bc4..75cc8bbc2b14 100644
|
||||
--- a/drivers/spi/spi.c
|
||||
+++ b/drivers/spi/spi.c
|
||||
@@ -127,10 +127,10 @@ do { \
|
||||
unsigned int start; \
|
||||
pcpu_stats = per_cpu_ptr(in, i); \
|
||||
do { \
|
||||
- start = u64_stats_fetch_begin_irq( \
|
||||
+ start = u64_stats_fetch_begin( \
|
||||
&pcpu_stats->syncp); \
|
||||
inc = u64_stats_read(&pcpu_stats->field); \
|
||||
- } while (u64_stats_fetch_retry_irq( \
|
||||
+ } while (u64_stats_fetch_retry( \
|
||||
&pcpu_stats->syncp, start)); \
|
||||
ret += inc; \
|
||||
} \
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,191 @@
|
|||
From be4c4dd6d8b7daa71457f2d02eead010dc5b77b5 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Fri, 8 Sep 2023 18:22:52 +0200
|
||||
Subject: [PATCH 005/195] locking/rtmutex: Use rt_mutex specific scheduler
|
||||
helpers
|
||||
|
||||
Have rt_mutex use the rt_mutex specific scheduler helpers to avoid
|
||||
recursion vs rtlock on the PI state.
|
||||
|
||||
[[ peterz: adapted to new names ]]
|
||||
|
||||
Reported-by: Crystal Wood <swood@redhat.com>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
||||
Link: https://lkml.kernel.org/r/20230908162254.999499-6-bigeasy@linutronix.de
|
||||
---
|
||||
kernel/futex/pi.c | 11 +++++++++++
|
||||
kernel/locking/rtmutex.c | 14 ++++++++++++--
|
||||
kernel/locking/rwbase_rt.c | 6 ++++++
|
||||
kernel/locking/rwsem.c | 8 +++++++-
|
||||
kernel/locking/spinlock_rt.c | 4 ++++
|
||||
5 files changed, 40 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/kernel/futex/pi.c b/kernel/futex/pi.c
|
||||
index ce2889f12375..f8e65b27d9d6 100644
|
||||
--- a/kernel/futex/pi.c
|
||||
+++ b/kernel/futex/pi.c
|
||||
@@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <linux/slab.h>
|
||||
+#include <linux/sched/rt.h>
|
||||
#include <linux/sched/task.h>
|
||||
|
||||
#include "futex.h"
|
||||
@@ -1002,6 +1003,12 @@ int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int tryl
|
||||
goto no_block;
|
||||
}
|
||||
|
||||
+ /*
|
||||
+ * Must be done before we enqueue the waiter, here is unfortunately
|
||||
+ * under the hb lock, but that *should* work because it does nothing.
|
||||
+ */
|
||||
+ rt_mutex_pre_schedule();
|
||||
+
|
||||
rt_mutex_init_waiter(&rt_waiter);
|
||||
|
||||
/*
|
||||
@@ -1052,6 +1059,10 @@ int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int tryl
|
||||
if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
|
||||
ret = 0;
|
||||
|
||||
+ /*
|
||||
+ * Waiter is unqueued.
|
||||
+ */
|
||||
+ rt_mutex_post_schedule();
|
||||
no_block:
|
||||
/*
|
||||
* Fixup the pi_state owner and possibly acquire the lock if we
|
||||
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
|
||||
index bcec0533a0cc..a3fe05dfd0d8 100644
|
||||
--- a/kernel/locking/rtmutex.c
|
||||
+++ b/kernel/locking/rtmutex.c
|
||||
@@ -1632,7 +1632,7 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
|
||||
raw_spin_unlock_irq(&lock->wait_lock);
|
||||
|
||||
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
|
||||
- schedule();
|
||||
+ rt_mutex_schedule();
|
||||
|
||||
raw_spin_lock_irq(&lock->wait_lock);
|
||||
set_current_state(state);
|
||||
@@ -1661,7 +1661,7 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
|
||||
WARN(1, "rtmutex deadlock detected\n");
|
||||
while (1) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
- schedule();
|
||||
+ rt_mutex_schedule();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1756,6 +1756,15 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
+ /*
|
||||
+ * Do all pre-schedule work here, before we queue a waiter and invoke
|
||||
+ * PI -- any such work that trips on rtlock (PREEMPT_RT spinlock) would
|
||||
+ * otherwise recurse back into task_blocks_on_rt_mutex() through
|
||||
+ * rtlock_slowlock() and will then enqueue a second waiter for this
|
||||
+ * same task and things get really confusing real fast.
|
||||
+ */
|
||||
+ rt_mutex_pre_schedule();
|
||||
+
|
||||
/*
|
||||
* Technically we could use raw_spin_[un]lock_irq() here, but this can
|
||||
* be called in early boot if the cmpxchg() fast path is disabled
|
||||
@@ -1767,6 +1776,7 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
|
||||
raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
||||
ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state);
|
||||
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
||||
+ rt_mutex_post_schedule();
|
||||
|
||||
return ret;
|
||||
}
|
||||
diff --git a/kernel/locking/rwbase_rt.c b/kernel/locking/rwbase_rt.c
|
||||
index 25ec0239477c..c7258cb32d91 100644
|
||||
--- a/kernel/locking/rwbase_rt.c
|
||||
+++ b/kernel/locking/rwbase_rt.c
|
||||
@@ -71,6 +71,7 @@ static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
|
||||
struct rt_mutex_base *rtm = &rwb->rtmutex;
|
||||
int ret;
|
||||
|
||||
+ rwbase_pre_schedule();
|
||||
raw_spin_lock_irq(&rtm->wait_lock);
|
||||
|
||||
/*
|
||||
@@ -125,6 +126,7 @@ static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
|
||||
rwbase_rtmutex_unlock(rtm);
|
||||
|
||||
trace_contention_end(rwb, ret);
|
||||
+ rwbase_post_schedule();
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -237,6 +239,8 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
|
||||
/* Force readers into slow path */
|
||||
atomic_sub(READER_BIAS, &rwb->readers);
|
||||
|
||||
+ rwbase_pre_schedule();
|
||||
+
|
||||
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
|
||||
if (__rwbase_write_trylock(rwb))
|
||||
goto out_unlock;
|
||||
@@ -248,6 +252,7 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
|
||||
if (rwbase_signal_pending_state(state, current)) {
|
||||
rwbase_restore_current_state();
|
||||
__rwbase_write_unlock(rwb, 0, flags);
|
||||
+ rwbase_post_schedule();
|
||||
trace_contention_end(rwb, -EINTR);
|
||||
return -EINTR;
|
||||
}
|
||||
@@ -266,6 +271,7 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
|
||||
|
||||
out_unlock:
|
||||
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
|
||||
+ rwbase_post_schedule();
|
||||
return 0;
|
||||
}
|
||||
|
||||
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
|
||||
index 9eabd585ce7a..2340b6d90ec6 100644
|
||||
--- a/kernel/locking/rwsem.c
|
||||
+++ b/kernel/locking/rwsem.c
|
||||
@@ -1427,8 +1427,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
|
||||
#define rwbase_signal_pending_state(state, current) \
|
||||
signal_pending_state(state, current)
|
||||
|
||||
+#define rwbase_pre_schedule() \
|
||||
+ rt_mutex_pre_schedule()
|
||||
+
|
||||
#define rwbase_schedule() \
|
||||
- schedule()
|
||||
+ rt_mutex_schedule()
|
||||
+
|
||||
+#define rwbase_post_schedule() \
|
||||
+ rt_mutex_post_schedule()
|
||||
|
||||
#include "rwbase_rt.c"
|
||||
|
||||
diff --git a/kernel/locking/spinlock_rt.c b/kernel/locking/spinlock_rt.c
|
||||
index 48a19ed8486d..842037b2ba54 100644
|
||||
--- a/kernel/locking/spinlock_rt.c
|
||||
+++ b/kernel/locking/spinlock_rt.c
|
||||
@@ -184,9 +184,13 @@ static __always_inline int rwbase_rtmutex_trylock(struct rt_mutex_base *rtm)
|
||||
|
||||
#define rwbase_signal_pending_state(state, current) (0)
|
||||
|
||||
+#define rwbase_pre_schedule()
|
||||
+
|
||||
#define rwbase_schedule() \
|
||||
schedule_rtlock()
|
||||
|
||||
+#define rwbase_post_schedule()
|
||||
+
|
||||
#include "rwbase_rt.c"
|
||||
/*
|
||||
* The common functions which get wrapped into the rwlock API.
|
||||
--
|
||||
2.43.0
|
||||
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,66 @@
|
|||
From beef31a7b4f8b038bfd4490f654df84668b806dc Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Fri, 8 Sep 2023 18:22:53 +0200
|
||||
Subject: [PATCH 006/195] locking/rtmutex: Add a lockdep assert to catch
|
||||
potential nested blocking
|
||||
|
||||
There used to be a BUG_ON(current->pi_blocked_on) in the lock acquisition
|
||||
functions, but that vanished in one of the rtmutex overhauls.
|
||||
|
||||
Bring it back in form of a lockdep assert to catch code paths which take
|
||||
rtmutex based locks with current::pi_blocked_on != NULL.
|
||||
|
||||
Reported-by: Crystal Wood <swood@redhat.com>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: "Peter Zijlstra (Intel)" <peterz@infradead.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
||||
Link: https://lkml.kernel.org/r/20230908162254.999499-7-bigeasy@linutronix.de
|
||||
---
|
||||
kernel/locking/rtmutex.c | 2 ++
|
||||
kernel/locking/rwbase_rt.c | 2 ++
|
||||
kernel/locking/spinlock_rt.c | 2 ++
|
||||
3 files changed, 6 insertions(+)
|
||||
|
||||
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
|
||||
index a3fe05dfd0d8..4a10e8c16fd2 100644
|
||||
--- a/kernel/locking/rtmutex.c
|
||||
+++ b/kernel/locking/rtmutex.c
|
||||
@@ -1784,6 +1784,8 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
|
||||
static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
|
||||
unsigned int state)
|
||||
{
|
||||
+ lockdep_assert(!current->pi_blocked_on);
|
||||
+
|
||||
if (likely(rt_mutex_try_acquire(lock)))
|
||||
return 0;
|
||||
|
||||
diff --git a/kernel/locking/rwbase_rt.c b/kernel/locking/rwbase_rt.c
|
||||
index c7258cb32d91..34a59569db6b 100644
|
||||
--- a/kernel/locking/rwbase_rt.c
|
||||
+++ b/kernel/locking/rwbase_rt.c
|
||||
@@ -133,6 +133,8 @@ static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
|
||||
static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb,
|
||||
unsigned int state)
|
||||
{
|
||||
+ lockdep_assert(!current->pi_blocked_on);
|
||||
+
|
||||
if (rwbase_read_trylock(rwb))
|
||||
return 0;
|
||||
|
||||
diff --git a/kernel/locking/spinlock_rt.c b/kernel/locking/spinlock_rt.c
|
||||
index 842037b2ba54..38e292454fcc 100644
|
||||
--- a/kernel/locking/spinlock_rt.c
|
||||
+++ b/kernel/locking/spinlock_rt.c
|
||||
@@ -37,6 +37,8 @@
|
||||
|
||||
static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
|
||||
{
|
||||
+ lockdep_assert(!current->pi_blocked_on);
|
||||
+
|
||||
if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
|
||||
rtlock_slowlock(rtm);
|
||||
}
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,392 +0,0 @@
|
|||
From 62269129168ade34b47e64d0c82c16ea665e2bfe Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 25 Aug 2022 16:17:37 +0200
|
||||
Subject: [PATCH 06/62] net: Remove the obsolte u64_stats_fetch_*_irq() users
|
||||
(net).
|
||||
|
||||
Now that the 32bit UP oddity is gone and 32bit uses always a sequence
|
||||
count, there is no need for the fetch_irq() variants anymore.
|
||||
|
||||
Convert to the regular interface.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
||||
---
|
||||
net/8021q/vlan_dev.c | 4 ++--
|
||||
net/bridge/br_multicast.c | 4 ++--
|
||||
net/bridge/br_vlan.c | 4 ++--
|
||||
net/core/dev.c | 4 ++--
|
||||
net/core/drop_monitor.c | 8 ++++----
|
||||
net/core/gen_stats.c | 16 ++++++++--------
|
||||
net/devlink/leftover.c | 4 ++--
|
||||
net/dsa/slave.c | 4 ++--
|
||||
net/ipv4/af_inet.c | 4 ++--
|
||||
net/ipv6/seg6_local.c | 4 ++--
|
||||
net/mac80211/sta_info.c | 8 ++++----
|
||||
net/mpls/af_mpls.c | 4 ++--
|
||||
net/netfilter/ipvs/ip_vs_ctl.c | 4 ++--
|
||||
net/netfilter/nf_tables_api.c | 4 ++--
|
||||
net/openvswitch/datapath.c | 4 ++--
|
||||
net/openvswitch/flow_table.c | 9 ++++-----
|
||||
16 files changed, 44 insertions(+), 45 deletions(-)
|
||||
|
||||
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
|
||||
index d3e511e1eba8..0fa52bcc296b 100644
|
||||
--- a/net/8021q/vlan_dev.c
|
||||
+++ b/net/8021q/vlan_dev.c
|
||||
@@ -712,13 +712,13 @@ static void vlan_dev_get_stats64(struct net_device *dev,
|
||||
|
||||
p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
|
||||
do {
|
||||
- start = u64_stats_fetch_begin_irq(&p->syncp);
|
||||
+ start = u64_stats_fetch_begin(&p->syncp);
|
||||
rxpackets = u64_stats_read(&p->rx_packets);
|
||||
rxbytes = u64_stats_read(&p->rx_bytes);
|
||||
rxmulticast = u64_stats_read(&p->rx_multicast);
|
||||
txpackets = u64_stats_read(&p->tx_packets);
|
||||
txbytes = u64_stats_read(&p->tx_bytes);
|
||||
- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
|
||||
+ } while (u64_stats_fetch_retry(&p->syncp, start));
|
||||
|
||||
stats->rx_packets += rxpackets;
|
||||
stats->rx_bytes += rxbytes;
|
||||
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
|
||||
index db4f2641d1cd..7e2a9fb5786c 100644
|
||||
--- a/net/bridge/br_multicast.c
|
||||
+++ b/net/bridge/br_multicast.c
|
||||
@@ -4899,9 +4899,9 @@ void br_multicast_get_stats(const struct net_bridge *br,
|
||||
unsigned int start;
|
||||
|
||||
do {
|
||||
- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
|
||||
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
|
||||
memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
|
||||
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
|
||||
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
|
||||
|
||||
mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
|
||||
mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
|
||||
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
|
||||
index 9ffd40b8270c..bc75fa1e4666 100644
|
||||
--- a/net/bridge/br_vlan.c
|
||||
+++ b/net/bridge/br_vlan.c
|
||||
@@ -1389,12 +1389,12 @@ void br_vlan_get_stats(const struct net_bridge_vlan *v,
|
||||
|
||||
cpu_stats = per_cpu_ptr(v->stats, i);
|
||||
do {
|
||||
- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
|
||||
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
|
||||
rxpackets = u64_stats_read(&cpu_stats->rx_packets);
|
||||
rxbytes = u64_stats_read(&cpu_stats->rx_bytes);
|
||||
txbytes = u64_stats_read(&cpu_stats->tx_bytes);
|
||||
txpackets = u64_stats_read(&cpu_stats->tx_packets);
|
||||
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
|
||||
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
|
||||
|
||||
u64_stats_add(&stats->rx_packets, rxpackets);
|
||||
u64_stats_add(&stats->rx_bytes, rxbytes);
|
||||
diff --git a/net/core/dev.c b/net/core/dev.c
|
||||
index 0a5566b6f8a2..29ae6265a408 100644
|
||||
--- a/net/core/dev.c
|
||||
+++ b/net/core/dev.c
|
||||
@@ -10508,12 +10508,12 @@ void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
|
||||
|
||||
stats = per_cpu_ptr(netstats, cpu);
|
||||
do {
|
||||
- start = u64_stats_fetch_begin_irq(&stats->syncp);
|
||||
+ start = u64_stats_fetch_begin(&stats->syncp);
|
||||
rx_packets = u64_stats_read(&stats->rx_packets);
|
||||
rx_bytes = u64_stats_read(&stats->rx_bytes);
|
||||
tx_packets = u64_stats_read(&stats->tx_packets);
|
||||
tx_bytes = u64_stats_read(&stats->tx_bytes);
|
||||
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
|
||||
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
|
||||
|
||||
s->rx_packets += rx_packets;
|
||||
s->rx_bytes += rx_bytes;
|
||||
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
|
||||
index 8e0a90b45df2..4d5e8b317c47 100644
|
||||
--- a/net/core/drop_monitor.c
|
||||
+++ b/net/core/drop_monitor.c
|
||||
@@ -1432,9 +1432,9 @@ static void net_dm_stats_read(struct net_dm_stats *stats)
|
||||
u64 dropped;
|
||||
|
||||
do {
|
||||
- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
|
||||
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
|
||||
dropped = u64_stats_read(&cpu_stats->dropped);
|
||||
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
|
||||
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
|
||||
|
||||
u64_stats_add(&stats->dropped, dropped);
|
||||
}
|
||||
@@ -1476,9 +1476,9 @@ static void net_dm_hw_stats_read(struct net_dm_stats *stats)
|
||||
u64 dropped;
|
||||
|
||||
do {
|
||||
- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
|
||||
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
|
||||
dropped = u64_stats_read(&cpu_stats->dropped);
|
||||
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
|
||||
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
|
||||
|
||||
u64_stats_add(&stats->dropped, dropped);
|
||||
}
|
||||
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
|
||||
index c8d137ef5980..b71ccaec0991 100644
|
||||
--- a/net/core/gen_stats.c
|
||||
+++ b/net/core/gen_stats.c
|
||||
@@ -135,10 +135,10 @@ static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_sync *bstats,
|
||||
u64 bytes, packets;
|
||||
|
||||
do {
|
||||
- start = u64_stats_fetch_begin_irq(&bcpu->syncp);
|
||||
+ start = u64_stats_fetch_begin(&bcpu->syncp);
|
||||
bytes = u64_stats_read(&bcpu->bytes);
|
||||
packets = u64_stats_read(&bcpu->packets);
|
||||
- } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
|
||||
+ } while (u64_stats_fetch_retry(&bcpu->syncp, start));
|
||||
|
||||
t_bytes += bytes;
|
||||
t_packets += packets;
|
||||
@@ -162,10 +162,10 @@ void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
|
||||
}
|
||||
do {
|
||||
if (running)
|
||||
- start = u64_stats_fetch_begin_irq(&b->syncp);
|
||||
+ start = u64_stats_fetch_begin(&b->syncp);
|
||||
bytes = u64_stats_read(&b->bytes);
|
||||
packets = u64_stats_read(&b->packets);
|
||||
- } while (running && u64_stats_fetch_retry_irq(&b->syncp, start));
|
||||
+ } while (running && u64_stats_fetch_retry(&b->syncp, start));
|
||||
|
||||
_bstats_update(bstats, bytes, packets);
|
||||
}
|
||||
@@ -187,10 +187,10 @@ static void gnet_stats_read_basic(u64 *ret_bytes, u64 *ret_packets,
|
||||
u64 bytes, packets;
|
||||
|
||||
do {
|
||||
- start = u64_stats_fetch_begin_irq(&bcpu->syncp);
|
||||
+ start = u64_stats_fetch_begin(&bcpu->syncp);
|
||||
bytes = u64_stats_read(&bcpu->bytes);
|
||||
packets = u64_stats_read(&bcpu->packets);
|
||||
- } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
|
||||
+ } while (u64_stats_fetch_retry(&bcpu->syncp, start));
|
||||
|
||||
t_bytes += bytes;
|
||||
t_packets += packets;
|
||||
@@ -201,10 +201,10 @@ static void gnet_stats_read_basic(u64 *ret_bytes, u64 *ret_packets,
|
||||
}
|
||||
do {
|
||||
if (running)
|
||||
- start = u64_stats_fetch_begin_irq(&b->syncp);
|
||||
+ start = u64_stats_fetch_begin(&b->syncp);
|
||||
*ret_bytes = u64_stats_read(&b->bytes);
|
||||
*ret_packets = u64_stats_read(&b->packets);
|
||||
- } while (running && u64_stats_fetch_retry_irq(&b->syncp, start));
|
||||
+ } while (running && u64_stats_fetch_retry(&b->syncp, start));
|
||||
}
|
||||
|
||||
static int
|
||||
diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c
|
||||
index 032c7af065cd..94e8cc3de330 100644
|
||||
--- a/net/devlink/leftover.c
|
||||
+++ b/net/devlink/leftover.c
|
||||
@@ -8307,10 +8307,10 @@ static void devlink_trap_stats_read(struct devlink_stats __percpu *trap_stats,
|
||||
|
||||
cpu_stats = per_cpu_ptr(trap_stats, i);
|
||||
do {
|
||||
- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
|
||||
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
|
||||
rx_packets = u64_stats_read(&cpu_stats->rx_packets);
|
||||
rx_bytes = u64_stats_read(&cpu_stats->rx_bytes);
|
||||
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
|
||||
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
|
||||
|
||||
u64_stats_add(&stats->rx_packets, rx_packets);
|
||||
u64_stats_add(&stats->rx_bytes, rx_bytes);
|
||||
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
|
||||
index 5fe075bf479e..28ee63ec1d1d 100644
|
||||
--- a/net/dsa/slave.c
|
||||
+++ b/net/dsa/slave.c
|
||||
@@ -976,12 +976,12 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev,
|
||||
|
||||
s = per_cpu_ptr(dev->tstats, i);
|
||||
do {
|
||||
- start = u64_stats_fetch_begin_irq(&s->syncp);
|
||||
+ start = u64_stats_fetch_begin(&s->syncp);
|
||||
tx_packets = u64_stats_read(&s->tx_packets);
|
||||
tx_bytes = u64_stats_read(&s->tx_bytes);
|
||||
rx_packets = u64_stats_read(&s->rx_packets);
|
||||
rx_bytes = u64_stats_read(&s->rx_bytes);
|
||||
- } while (u64_stats_fetch_retry_irq(&s->syncp, start));
|
||||
+ } while (u64_stats_fetch_retry(&s->syncp, start));
|
||||
data[0] += tx_packets;
|
||||
data[1] += tx_bytes;
|
||||
data[2] += rx_packets;
|
||||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
|
||||
index 347c3768df6e..bc3d36463e32 100644
|
||||
--- a/net/ipv4/af_inet.c
|
||||
+++ b/net/ipv4/af_inet.c
|
||||
@@ -1726,9 +1726,9 @@ u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
|
||||
bhptr = per_cpu_ptr(mib, cpu);
|
||||
syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
|
||||
do {
|
||||
- start = u64_stats_fetch_begin_irq(syncp);
|
||||
+ start = u64_stats_fetch_begin(syncp);
|
||||
v = *(((u64 *)bhptr) + offt);
|
||||
- } while (u64_stats_fetch_retry_irq(syncp, start));
|
||||
+ } while (u64_stats_fetch_retry(syncp, start));
|
||||
|
||||
return v;
|
||||
}
|
||||
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
|
||||
index 8370726ae7bf..487f8e98deaa 100644
|
||||
--- a/net/ipv6/seg6_local.c
|
||||
+++ b/net/ipv6/seg6_local.c
|
||||
@@ -1644,13 +1644,13 @@ static int put_nla_counters(struct sk_buff *skb, struct seg6_local_lwt *slwt)
|
||||
|
||||
pcounters = per_cpu_ptr(slwt->pcpu_counters, i);
|
||||
do {
|
||||
- start = u64_stats_fetch_begin_irq(&pcounters->syncp);
|
||||
+ start = u64_stats_fetch_begin(&pcounters->syncp);
|
||||
|
||||
packets = u64_stats_read(&pcounters->packets);
|
||||
bytes = u64_stats_read(&pcounters->bytes);
|
||||
errors = u64_stats_read(&pcounters->errors);
|
||||
|
||||
- } while (u64_stats_fetch_retry_irq(&pcounters->syncp, start));
|
||||
+ } while (u64_stats_fetch_retry(&pcounters->syncp, start));
|
||||
|
||||
counters.packets += packets;
|
||||
counters.bytes += bytes;
|
||||
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
|
||||
index 49b71453dec3..c462e20ccc8d 100644
|
||||
--- a/net/mac80211/sta_info.c
|
||||
+++ b/net/mac80211/sta_info.c
|
||||
@@ -2397,9 +2397,9 @@ static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats,
|
||||
u64 value;
|
||||
|
||||
do {
|
||||
- start = u64_stats_fetch_begin_irq(&rxstats->syncp);
|
||||
+ start = u64_stats_fetch_begin(&rxstats->syncp);
|
||||
value = rxstats->msdu[tid];
|
||||
- } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
|
||||
+ } while (u64_stats_fetch_retry(&rxstats->syncp, start));
|
||||
|
||||
return value;
|
||||
}
|
||||
@@ -2465,9 +2465,9 @@ static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats)
|
||||
u64 value;
|
||||
|
||||
do {
|
||||
- start = u64_stats_fetch_begin_irq(&rxstats->syncp);
|
||||
+ start = u64_stats_fetch_begin(&rxstats->syncp);
|
||||
value = rxstats->bytes;
|
||||
- } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
|
||||
+ } while (u64_stats_fetch_retry(&rxstats->syncp, start));
|
||||
|
||||
return value;
|
||||
}
|
||||
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
|
||||
index f1f43894efb8..dc5165d3eec4 100644
|
||||
--- a/net/mpls/af_mpls.c
|
||||
+++ b/net/mpls/af_mpls.c
|
||||
@@ -1079,9 +1079,9 @@ static void mpls_get_stats(struct mpls_dev *mdev,
|
||||
|
||||
p = per_cpu_ptr(mdev->stats, i);
|
||||
do {
|
||||
- start = u64_stats_fetch_begin_irq(&p->syncp);
|
||||
+ start = u64_stats_fetch_begin(&p->syncp);
|
||||
local = p->stats;
|
||||
- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
|
||||
+ } while (u64_stats_fetch_retry(&p->syncp, start));
|
||||
|
||||
stats->rx_packets += local.rx_packets;
|
||||
stats->rx_bytes += local.rx_bytes;
|
||||
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
|
||||
index 17a1b731a76b..2be696513629 100644
|
||||
--- a/net/netfilter/ipvs/ip_vs_ctl.c
|
||||
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
|
||||
@@ -2299,13 +2299,13 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
|
||||
u64 conns, inpkts, outpkts, inbytes, outbytes;
|
||||
|
||||
do {
|
||||
- start = u64_stats_fetch_begin_irq(&u->syncp);
|
||||
+ start = u64_stats_fetch_begin(&u->syncp);
|
||||
conns = u64_stats_read(&u->cnt.conns);
|
||||
inpkts = u64_stats_read(&u->cnt.inpkts);
|
||||
outpkts = u64_stats_read(&u->cnt.outpkts);
|
||||
inbytes = u64_stats_read(&u->cnt.inbytes);
|
||||
outbytes = u64_stats_read(&u->cnt.outbytes);
|
||||
- } while (u64_stats_fetch_retry_irq(&u->syncp, start));
|
||||
+ } while (u64_stats_fetch_retry(&u->syncp, start));
|
||||
|
||||
seq_printf(seq, "%3X %8LX %8LX %8LX %16LX %16LX\n",
|
||||
i, (u64)conns, (u64)inpkts,
|
||||
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
|
||||
index 3d6ebb9877a4..0e237f0356ae 100644
|
||||
--- a/net/netfilter/nf_tables_api.c
|
||||
+++ b/net/netfilter/nf_tables_api.c
|
||||
@@ -1692,10 +1692,10 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpu_stats = per_cpu_ptr(stats, cpu);
|
||||
do {
|
||||
- seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
|
||||
+ seq = u64_stats_fetch_begin(&cpu_stats->syncp);
|
||||
pkts = cpu_stats->pkts;
|
||||
bytes = cpu_stats->bytes;
|
||||
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
|
||||
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, seq));
|
||||
total.pkts += pkts;
|
||||
total.bytes += bytes;
|
||||
}
|
||||
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
|
||||
index 3c7b24535409..0953f531f984 100644
|
||||
--- a/net/openvswitch/datapath.c
|
||||
+++ b/net/openvswitch/datapath.c
|
||||
@@ -716,9 +716,9 @@ static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
|
||||
percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
|
||||
|
||||
do {
|
||||
- start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
|
||||
+ start = u64_stats_fetch_begin(&percpu_stats->syncp);
|
||||
local_stats = *percpu_stats;
|
||||
- } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
|
||||
+ } while (u64_stats_fetch_retry(&percpu_stats->syncp, start));
|
||||
|
||||
stats->n_hit += local_stats.n_hit;
|
||||
stats->n_missed += local_stats.n_missed;
|
||||
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
|
||||
index d4a2db0b2299..0a0e4c283f02 100644
|
||||
--- a/net/openvswitch/flow_table.c
|
||||
+++ b/net/openvswitch/flow_table.c
|
||||
@@ -205,9 +205,9 @@ static void tbl_mask_array_reset_counters(struct mask_array *ma)
|
||||
|
||||
stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
|
||||
do {
|
||||
- start = u64_stats_fetch_begin_irq(&stats->syncp);
|
||||
+ start = u64_stats_fetch_begin(&stats->syncp);
|
||||
counter = stats->usage_cntrs[i];
|
||||
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
|
||||
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
|
||||
|
||||
ma->masks_usage_zero_cntr[i] += counter;
|
||||
}
|
||||
@@ -1136,10 +1136,9 @@ void ovs_flow_masks_rebalance(struct flow_table *table)
|
||||
|
||||
stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
|
||||
do {
|
||||
- start = u64_stats_fetch_begin_irq(&stats->syncp);
|
||||
+ start = u64_stats_fetch_begin(&stats->syncp);
|
||||
counter = stats->usage_cntrs[i];
|
||||
- } while (u64_stats_fetch_retry_irq(&stats->syncp,
|
||||
- start));
|
||||
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
|
||||
|
||||
masks_and_count[i].counter += counter;
|
||||
}
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,50 +0,0 @@
|
|||
From 9350167d72e9692f289b2569b6f24da0b894aac6 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 25 Aug 2022 16:17:57 +0200
|
||||
Subject: [PATCH 07/62] bpf: Remove the obsolte u64_stats_fetch_*_irq() users.
|
||||
|
||||
Now that the 32bit UP oddity is gone and 32bit uses always a sequence
|
||||
count, there is no need for the fetch_irq() variants anymore.
|
||||
|
||||
Convert to the regular interface.
|
||||
|
||||
Cc: Alexei Starovoitov <ast@kernel.org>
|
||||
Cc: Andrii Nakryiko <andrii@kernel.org>
|
||||
Cc: Daniel Borkmann <daniel@iogearbox.net>
|
||||
Cc: Hao Luo <haoluo@google.com>
|
||||
Cc: Jiri Olsa <jolsa@kernel.org>
|
||||
Cc: John Fastabend <john.fastabend@gmail.com>
|
||||
Cc: KP Singh <kpsingh@kernel.org>
|
||||
Cc: Martin KaFai Lau <martin.lau@linux.dev>
|
||||
Cc: Song Liu <song@kernel.org>
|
||||
Cc: Stanislav Fomichev <sdf@google.com>
|
||||
Cc: Yonghong Song <yhs@fb.com>
|
||||
Cc: bpf@vger.kernel.org
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
||||
---
|
||||
kernel/bpf/syscall.c | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
|
||||
index 0c8b7733573e..c0915e2424f1 100644
|
||||
--- a/kernel/bpf/syscall.c
|
||||
+++ b/kernel/bpf/syscall.c
|
||||
@@ -2115,11 +2115,11 @@ static void bpf_prog_get_stats(const struct bpf_prog *prog,
|
||||
|
||||
st = per_cpu_ptr(prog->stats, cpu);
|
||||
do {
|
||||
- start = u64_stats_fetch_begin_irq(&st->syncp);
|
||||
+ start = u64_stats_fetch_begin(&st->syncp);
|
||||
tnsecs = u64_stats_read(&st->nsecs);
|
||||
tcnt = u64_stats_read(&st->cnt);
|
||||
tmisses = u64_stats_read(&st->misses);
|
||||
- } while (u64_stats_fetch_retry_irq(&st->syncp, start));
|
||||
+ } while (u64_stats_fetch_retry(&st->syncp, start));
|
||||
nsecs += tnsecs;
|
||||
cnt += tcnt;
|
||||
misses += tmisses;
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,204 @@
|
|||
From 067017a07e06c6ab7a99d0a9da097d3cdbc07d74 Mon Sep 17 00:00:00 2001
|
||||
From: Peter Zijlstra <peterz@infradead.org>
|
||||
Date: Fri, 15 Sep 2023 17:19:44 +0200
|
||||
Subject: [PATCH 007/195] futex/pi: Fix recursive rt_mutex waiter state
|
||||
|
||||
Some new assertions pointed out that the existing code has nested rt_mutex wait
|
||||
state in the futex code.
|
||||
|
||||
Specifically, the futex_lock_pi() cancel case uses spin_lock() while there
|
||||
still is a rt_waiter enqueued for this task, resulting in a state where there
|
||||
are two waiters for the same task (and task_struct::pi_blocked_on gets
|
||||
scrambled).
|
||||
|
||||
The reason to take hb->lock at this point is to avoid the wake_futex_pi()
|
||||
EAGAIN case.
|
||||
|
||||
This happens when futex_top_waiter() and rt_mutex_top_waiter() state becomes
|
||||
inconsistent. The current rules are such that this inconsistency will not be
|
||||
observed.
|
||||
|
||||
Notably the case that needs to be avoided is where futex_lock_pi() and
|
||||
futex_unlock_pi() interleave such that unlock will fail to observe a new
|
||||
waiter.
|
||||
|
||||
*However* the case at hand is where a waiter is leaving, in this case the race
|
||||
means a waiter that is going away is not observed -- which is harmless,
|
||||
provided this race is explicitly handled.
|
||||
|
||||
This is a somewhat dangerous proposition because the converse race is not
|
||||
observing a new waiter, which must absolutely not happen. But since the race is
|
||||
valid this cannot be asserted.
|
||||
|
||||
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
||||
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Link: https://lkml.kernel.org/r/20230915151943.GD6743@noisy.programming.kicks-ass.net
|
||||
---
|
||||
kernel/futex/pi.c | 76 ++++++++++++++++++++++++++----------------
|
||||
kernel/futex/requeue.c | 6 ++--
|
||||
2 files changed, 52 insertions(+), 30 deletions(-)
|
||||
|
||||
diff --git a/kernel/futex/pi.c b/kernel/futex/pi.c
|
||||
index f8e65b27d9d6..d636a1bbd7d0 100644
|
||||
--- a/kernel/futex/pi.c
|
||||
+++ b/kernel/futex/pi.c
|
||||
@@ -611,29 +611,16 @@ int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
|
||||
/*
|
||||
* Caller must hold a reference on @pi_state.
|
||||
*/
|
||||
-static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
|
||||
+static int wake_futex_pi(u32 __user *uaddr, u32 uval,
|
||||
+ struct futex_pi_state *pi_state,
|
||||
+ struct rt_mutex_waiter *top_waiter)
|
||||
{
|
||||
- struct rt_mutex_waiter *top_waiter;
|
||||
struct task_struct *new_owner;
|
||||
bool postunlock = false;
|
||||
DEFINE_RT_WAKE_Q(wqh);
|
||||
u32 curval, newval;
|
||||
int ret = 0;
|
||||
|
||||
- top_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
|
||||
- if (WARN_ON_ONCE(!top_waiter)) {
|
||||
- /*
|
||||
- * As per the comment in futex_unlock_pi() this should not happen.
|
||||
- *
|
||||
- * When this happens, give up our locks and try again, giving
|
||||
- * the futex_lock_pi() instance time to complete, either by
|
||||
- * waiting on the rtmutex or removing itself from the futex
|
||||
- * queue.
|
||||
- */
|
||||
- ret = -EAGAIN;
|
||||
- goto out_unlock;
|
||||
- }
|
||||
-
|
||||
new_owner = top_waiter->task;
|
||||
|
||||
/*
|
||||
@@ -1046,19 +1033,33 @@ int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int tryl
|
||||
ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
|
||||
|
||||
cleanup:
|
||||
- spin_lock(q.lock_ptr);
|
||||
/*
|
||||
* If we failed to acquire the lock (deadlock/signal/timeout), we must
|
||||
- * first acquire the hb->lock before removing the lock from the
|
||||
- * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
|
||||
- * lists consistent.
|
||||
+ * must unwind the above, however we canont lock hb->lock because
|
||||
+ * rt_mutex already has a waiter enqueued and hb->lock can itself try
|
||||
+ * and enqueue an rt_waiter through rtlock.
|
||||
+ *
|
||||
+ * Doing the cleanup without holding hb->lock can cause inconsistent
|
||||
+ * state between hb and pi_state, but only in the direction of not
|
||||
+ * seeing a waiter that is leaving.
|
||||
+ *
|
||||
+ * See futex_unlock_pi(), it deals with this inconsistency.
|
||||
*
|
||||
- * In particular; it is important that futex_unlock_pi() can not
|
||||
- * observe this inconsistency.
|
||||
+ * There be dragons here, since we must deal with the inconsistency on
|
||||
+ * the way out (here), it is impossible to detect/warn about the race
|
||||
+ * the other way around (missing an incoming waiter).
|
||||
+ *
|
||||
+ * What could possibly go wrong...
|
||||
*/
|
||||
if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
|
||||
ret = 0;
|
||||
|
||||
+ /*
|
||||
+ * Now that the rt_waiter has been dequeued, it is safe to use
|
||||
+ * spinlock/rtlock (which might enqueue its own rt_waiter) and fix up
|
||||
+ * the
|
||||
+ */
|
||||
+ spin_lock(q.lock_ptr);
|
||||
/*
|
||||
* Waiter is unqueued.
|
||||
*/
|
||||
@@ -1143,6 +1144,7 @@ int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
|
||||
top_waiter = futex_top_waiter(hb, &key);
|
||||
if (top_waiter) {
|
||||
struct futex_pi_state *pi_state = top_waiter->pi_state;
|
||||
+ struct rt_mutex_waiter *rt_waiter;
|
||||
|
||||
ret = -EINVAL;
|
||||
if (!pi_state)
|
||||
@@ -1155,22 +1157,39 @@ int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
|
||||
if (pi_state->owner != current)
|
||||
goto out_unlock;
|
||||
|
||||
- get_pi_state(pi_state);
|
||||
/*
|
||||
* By taking wait_lock while still holding hb->lock, we ensure
|
||||
- * there is no point where we hold neither; and therefore
|
||||
- * wake_futex_p() must observe a state consistent with what we
|
||||
- * observed.
|
||||
+ * there is no point where we hold neither; and thereby
|
||||
+ * wake_futex_pi() must observe any new waiters.
|
||||
+ *
|
||||
+ * Since the cleanup: case in futex_lock_pi() removes the
|
||||
+ * rt_waiter without holding hb->lock, it is possible for
|
||||
+ * wake_futex_pi() to not find a waiter while the above does,
|
||||
+ * in this case the waiter is on the way out and it can be
|
||||
+ * ignored.
|
||||
*
|
||||
* In particular; this forces __rt_mutex_start_proxy() to
|
||||
* complete such that we're guaranteed to observe the
|
||||
- * rt_waiter. Also see the WARN in wake_futex_pi().
|
||||
+ * rt_waiter.
|
||||
*/
|
||||
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
|
||||
+
|
||||
+ /*
|
||||
+ * Futex vs rt_mutex waiter state -- if there are no rt_mutex
|
||||
+ * waiters even though futex thinks there are, then the waiter
|
||||
+ * is leaving and the uncontended path is safe to take.
|
||||
+ */
|
||||
+ rt_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
|
||||
+ if (!rt_waiter) {
|
||||
+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
|
||||
+ goto do_uncontended;
|
||||
+ }
|
||||
+
|
||||
+ get_pi_state(pi_state);
|
||||
spin_unlock(&hb->lock);
|
||||
|
||||
/* drops pi_state->pi_mutex.wait_lock */
|
||||
- ret = wake_futex_pi(uaddr, uval, pi_state);
|
||||
+ ret = wake_futex_pi(uaddr, uval, pi_state, rt_waiter);
|
||||
|
||||
put_pi_state(pi_state);
|
||||
|
||||
@@ -1198,6 +1217,7 @@ int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
|
||||
return ret;
|
||||
}
|
||||
|
||||
+do_uncontended:
|
||||
/*
|
||||
* We have no kernel internal state, i.e. no waiters in the
|
||||
* kernel. Waiters which are about to queue themselves are stuck
|
||||
diff --git a/kernel/futex/requeue.c b/kernel/futex/requeue.c
|
||||
index cba8b1a6a4cc..4c73e0b81acc 100644
|
||||
--- a/kernel/futex/requeue.c
|
||||
+++ b/kernel/futex/requeue.c
|
||||
@@ -850,11 +850,13 @@ int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
||||
pi_mutex = &q.pi_state->pi_mutex;
|
||||
ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
|
||||
|
||||
- /* Current is not longer pi_blocked_on */
|
||||
- spin_lock(q.lock_ptr);
|
||||
+ /*
|
||||
+ * See futex_unlock_pi()'s cleanup: comment.
|
||||
+ */
|
||||
if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
|
||||
ret = 0;
|
||||
|
||||
+ spin_lock(q.lock_ptr);
|
||||
debug_rt_mutex_free_waiter(&rt_waiter);
|
||||
/*
|
||||
* Fixup the pi_state owner and possibly acquire the lock if we
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
From 854cc4a6294ebc631fceda57138d1419977e9c87 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Thu, 3 Aug 2023 12:09:31 +0200
|
||||
Subject: [PATCH 008/195] signal: Add proper comment about the preempt-disable
|
||||
in ptrace_stop().
|
||||
|
||||
Commit 53da1d9456fe7 ("fix ptrace slowness") added a preempt-disable section
|
||||
between read_unlock() and the following schedule() invocation without
|
||||
explaining why it is needed.
|
||||
|
||||
Replace the comment with an explanation why this is needed. Clarify that
|
||||
it is needed for correctness but for performance reasons.
|
||||
|
||||
Acked-by: Oleg Nesterov <oleg@redhat.com>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230803100932.325870-2-bigeasy@linutronix.de
|
||||
---
|
||||
kernel/signal.c | 17 ++++++++++++++---
|
||||
1 file changed, 14 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/kernel/signal.c b/kernel/signal.c
|
||||
index 09019017d669..051ed8114cd4 100644
|
||||
--- a/kernel/signal.c
|
||||
+++ b/kernel/signal.c
|
||||
@@ -2329,10 +2329,21 @@ static int ptrace_stop(int exit_code, int why, unsigned long message,
|
||||
do_notify_parent_cldstop(current, false, why);
|
||||
|
||||
/*
|
||||
- * Don't want to allow preemption here, because
|
||||
- * sys_ptrace() needs this task to be inactive.
|
||||
+ * The previous do_notify_parent_cldstop() invocation woke ptracer.
|
||||
+ * One a PREEMPTION kernel this can result in preemption requirement
|
||||
+ * which will be fulfilled after read_unlock() and the ptracer will be
|
||||
+ * put on the CPU.
|
||||
+ * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
|
||||
+ * this task wait in schedule(). If this task gets preempted then it
|
||||
+ * remains enqueued on the runqueue. The ptracer will observe this and
|
||||
+ * then sleep for a delay of one HZ tick. In the meantime this task
|
||||
+ * gets scheduled, enters schedule() and will wait for the ptracer.
|
||||
*
|
||||
- * XXX: implement read_unlock_no_resched().
|
||||
+ * This preemption point is not bad from correctness point of view but
|
||||
+ * extends the runtime by one HZ tick time due to the ptracer's sleep.
|
||||
+ * The preempt-disable section ensures that there will be no preemption
|
||||
+ * between unlock and schedule() and so improving the performance since
|
||||
+ * the ptracer has no reason to sleep.
|
||||
*/
|
||||
preempt_disable();
|
||||
read_unlock(&tasklist_lock);
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
From cf6d7ad80beb2d90140c212f8ccd32197e0eccdb Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 25 Aug 2022 16:43:46 +0200
|
||||
Subject: [PATCH 08/62] u64_stat: Remove the obsolete fetch_irq() variants.
|
||||
|
||||
Now that the 32bit UP oddity is gone and 32bit uses always a sequence
|
||||
count, there is no need for the fetch_irq() variants anymore.
|
||||
|
||||
Delete the obsolete interfaces.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
||||
---
|
||||
include/linux/u64_stats_sync.h | 12 ------------
|
||||
1 file changed, 12 deletions(-)
|
||||
|
||||
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
|
||||
index 46040d66334a..ffe48e69b3f3 100644
|
||||
--- a/include/linux/u64_stats_sync.h
|
||||
+++ b/include/linux/u64_stats_sync.h
|
||||
@@ -213,16 +213,4 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
|
||||
return __u64_stats_fetch_retry(syncp, start);
|
||||
}
|
||||
|
||||
-/* Obsolete interfaces */
|
||||
-static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
|
||||
-{
|
||||
- return u64_stats_fetch_begin(syncp);
|
||||
-}
|
||||
-
|
||||
-static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
|
||||
- unsigned int start)
|
||||
-{
|
||||
- return u64_stats_fetch_retry(syncp, start);
|
||||
-}
|
||||
-
|
||||
#endif /* _LINUX_U64_STATS_SYNC_H */
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
From 819c398c8fc580efa6af71d8880e57d762c7b53a Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Thu, 3 Aug 2023 12:09:32 +0200
|
||||
Subject: [PATCH 009/195] signal: Don't disable preemption in ptrace_stop() on
|
||||
PREEMPT_RT.
|
||||
|
||||
On PREEMPT_RT keeping preemption disabled during the invocation of
|
||||
cgroup_enter_frozen() is a problem because the function acquires css_set_lock
|
||||
which is a sleeping lock on PREEMPT_RT and must not be acquired with disabled
|
||||
preemption.
|
||||
The preempt-disabled section is only for performance optimisation
|
||||
reasons and can be avoided.
|
||||
|
||||
Extend the comment and don't disable preemption before scheduling on
|
||||
PREEMPT_RT.
|
||||
|
||||
Acked-by: Oleg Nesterov <oleg@redhat.com>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230803100932.325870-3-bigeasy@linutronix.de
|
||||
---
|
||||
kernel/signal.c | 13 +++++++++++--
|
||||
1 file changed, 11 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/kernel/signal.c b/kernel/signal.c
|
||||
index 051ed8114cd4..b71026341056 100644
|
||||
--- a/kernel/signal.c
|
||||
+++ b/kernel/signal.c
|
||||
@@ -2344,11 +2344,20 @@ static int ptrace_stop(int exit_code, int why, unsigned long message,
|
||||
* The preempt-disable section ensures that there will be no preemption
|
||||
* between unlock and schedule() and so improving the performance since
|
||||
* the ptracer has no reason to sleep.
|
||||
+ *
|
||||
+ * On PREEMPT_RT locking tasklist_lock does not disable preemption.
|
||||
+ * Therefore the task can be preempted (after
|
||||
+ * do_notify_parent_cldstop()) before unlocking tasklist_lock so there
|
||||
+ * is no benefit in doing this. The optimisation is harmful on
|
||||
+ * PEEMPT_RT because the spinlock_t (in cgroup_enter_frozen()) must not
|
||||
+ * be acquired with disabled preemption.
|
||||
*/
|
||||
- preempt_disable();
|
||||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
+ preempt_disable();
|
||||
read_unlock(&tasklist_lock);
|
||||
cgroup_enter_frozen();
|
||||
- preempt_enable_no_resched();
|
||||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
+ preempt_enable_no_resched();
|
||||
schedule();
|
||||
cgroup_leave_frozen(true);
|
||||
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,91 @@
|
|||
From beec053e6c4b3efcfb0c71e5594de0df4651d6f8 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Thu, 21 Sep 2023 16:15:12 +0200
|
||||
Subject: [PATCH 010/195] drm/amd/display: Remove migrate_en/dis from
|
||||
dc_fpu_begin().
|
||||
|
||||
This is a revert of the commit mentioned below while it is not wrong, as
|
||||
in the kernel will explode, having migrate_disable() here it is
|
||||
complete waste of resources.
|
||||
|
||||
Additionally commit message is plain wrong the review tag does not make
|
||||
it any better. The migrate_disable() interface has a fat comment
|
||||
describing it and it includes the word "undesired" in the headline which
|
||||
should tickle people to read it before using it.
|
||||
Initially I assumed it is worded too harsh but now I beg to differ.
|
||||
|
||||
The reviewer of the original commit, even not understanding what
|
||||
migrate_disable() does should ask the following:
|
||||
|
||||
- migrate_disable() is added only to the CONFIG_X86 block and it claims
|
||||
to protect fpu_recursion_depth. Why are the other the architectures
|
||||
excluded?
|
||||
|
||||
- migrate_disable() is added after fpu_recursion_depth was modified.
|
||||
Shouldn't it be added before the modification or referencing takes
|
||||
place?
|
||||
|
||||
Moving on.
|
||||
Disabling preemption DOES prevent CPU migration. A task, that can not be
|
||||
pushed away from the CPU by the scheduler (due to disabled preemption)
|
||||
can not be pushed or migrated to another CPU.
|
||||
|
||||
Disabling migration DOES NOT ensure consistency of per-CPU variables. It
|
||||
only ensures that the task acts always on the same per-CPU variable. The
|
||||
task remains preemptible meaning multiple tasks can access the same
|
||||
per-CPU variable. This in turn leads to inconsistency for the statement
|
||||
|
||||
*pcpu -= 1;
|
||||
|
||||
with two tasks on one CPU and a preemption point during the RMW
|
||||
operation:
|
||||
|
||||
Task A Task B
|
||||
read pcpu to reg # 0
|
||||
inc reg # 0 -> 1
|
||||
read pcpu to reg # 0
|
||||
inc reg # 0 -> 1
|
||||
write reg to pcpu # 1
|
||||
write reg to pcpu # 1
|
||||
|
||||
At the end pcpu reads 1 but should read 2 instead. Boom.
|
||||
|
||||
get_cpu_ptr() already contains a preempt_disable() statement. That means
|
||||
that the per-CPU variable can only be referenced by a single task which
|
||||
is currently running. The only inconsistency that can occur if the
|
||||
variable is additionally accessed from an interrupt.
|
||||
|
||||
Remove migrate_disable/enable() from dc_fpu_begin/end().
|
||||
|
||||
Cc: Tianci Yin <tianci.yin@amd.com>
|
||||
Cc: Aurabindo Pillai <aurabindo.pillai@amd.com>
|
||||
Fixes: 0c316556d1249 ("drm/amd/display: Disable migration to ensure consistency of per-CPU variable")
|
||||
Link: https://lore.kernel.org/r/20230921141516.520471-2-bigeasy@linutronix.de
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c | 2 --
|
||||
1 file changed, 2 deletions(-)
|
||||
|
||||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
|
||||
index 172aa10a8800..86f4c0e04654 100644
|
||||
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
|
||||
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
|
||||
@@ -91,7 +91,6 @@ void dc_fpu_begin(const char *function_name, const int line)
|
||||
|
||||
if (*pcpu == 1) {
|
||||
#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
|
||||
- migrate_disable();
|
||||
kernel_fpu_begin();
|
||||
#elif defined(CONFIG_PPC64)
|
||||
if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
|
||||
@@ -132,7 +131,6 @@ void dc_fpu_end(const char *function_name, const int line)
|
||||
if (*pcpu <= 0) {
|
||||
#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
|
||||
kernel_fpu_end();
|
||||
- migrate_enable();
|
||||
#elif defined(CONFIG_PPC64)
|
||||
if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
|
||||
disable_kernel_vsx();
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,132 @@
|
|||
From 13345b6e1dccf7f0404111eb4081834eaf628563 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Thu, 21 Sep 2023 16:15:13 +0200
|
||||
Subject: [PATCH 011/195] drm/amd/display: Simplify the per-CPU usage.
|
||||
|
||||
The fpu_recursion_depth counter is used to ensure that dc_fpu_begin()
|
||||
can be invoked multiple times while the FPU-disable function itself is
|
||||
only invoked once. Also the counter part (dc_fpu_end()) is ballanced
|
||||
properly.
|
||||
|
||||
Instead of using the get_cpu_ptr() dance around the inc it is simpler to
|
||||
increment the per-CPU variable directly. Also the per-CPU variable has
|
||||
to be incremented and decremented on the same CPU. This is ensured by
|
||||
the inner-part which disables preemption. This is kind of not obvious,
|
||||
works and the preempt-counter is touched a few times for no reason.
|
||||
|
||||
Disable preemption before incrementing fpu_recursion_depth for the first
|
||||
time. Keep preemption disabled until dc_fpu_end() where the counter is
|
||||
decremented making it obvious that the preemption has to stay disabled
|
||||
while the counter is non-zero.
|
||||
Use simple inc/dec functions.
|
||||
Remove the nested preempt_disable/enable functions which are now not
|
||||
needed.
|
||||
|
||||
Link: https://lore.kernel.org/r/20230921141516.520471-3-bigeasy@linutronix.de
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
.../gpu/drm/amd/display/amdgpu_dm/dc_fpu.c | 50 ++++++++-----------
|
||||
1 file changed, 20 insertions(+), 30 deletions(-)
|
||||
|
||||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
|
||||
index 86f4c0e04654..8bd5926b47e0 100644
|
||||
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
|
||||
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
|
||||
@@ -60,11 +60,9 @@ static DEFINE_PER_CPU(int, fpu_recursion_depth);
|
||||
*/
|
||||
inline void dc_assert_fp_enabled(void)
|
||||
{
|
||||
- int *pcpu, depth = 0;
|
||||
+ int depth;
|
||||
|
||||
- pcpu = get_cpu_ptr(&fpu_recursion_depth);
|
||||
- depth = *pcpu;
|
||||
- put_cpu_ptr(&fpu_recursion_depth);
|
||||
+ depth = __this_cpu_read(fpu_recursion_depth);
|
||||
|
||||
ASSERT(depth >= 1);
|
||||
}
|
||||
@@ -84,32 +82,27 @@ inline void dc_assert_fp_enabled(void)
|
||||
*/
|
||||
void dc_fpu_begin(const char *function_name, const int line)
|
||||
{
|
||||
- int *pcpu;
|
||||
+ int depth;
|
||||
|
||||
- pcpu = get_cpu_ptr(&fpu_recursion_depth);
|
||||
- *pcpu += 1;
|
||||
+ preempt_disable();
|
||||
+ depth = __this_cpu_inc_return(fpu_recursion_depth);
|
||||
|
||||
- if (*pcpu == 1) {
|
||||
+ if (depth == 1) {
|
||||
#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
|
||||
kernel_fpu_begin();
|
||||
#elif defined(CONFIG_PPC64)
|
||||
- if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
|
||||
- preempt_disable();
|
||||
+ if (cpu_has_feature(CPU_FTR_VSX_COMP))
|
||||
enable_kernel_vsx();
|
||||
- } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
|
||||
- preempt_disable();
|
||||
+ else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP))
|
||||
enable_kernel_altivec();
|
||||
- } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
|
||||
- preempt_disable();
|
||||
+ else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE))
|
||||
enable_kernel_fp();
|
||||
- }
|
||||
#elif defined(CONFIG_ARM64)
|
||||
kernel_neon_begin();
|
||||
#endif
|
||||
}
|
||||
|
||||
- TRACE_DCN_FPU(true, function_name, line, *pcpu);
|
||||
- put_cpu_ptr(&fpu_recursion_depth);
|
||||
+ TRACE_DCN_FPU(true, function_name, line, depth);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -124,29 +117,26 @@ void dc_fpu_begin(const char *function_name, const int line)
|
||||
*/
|
||||
void dc_fpu_end(const char *function_name, const int line)
|
||||
{
|
||||
- int *pcpu;
|
||||
+ int depth;
|
||||
|
||||
- pcpu = get_cpu_ptr(&fpu_recursion_depth);
|
||||
- *pcpu -= 1;
|
||||
- if (*pcpu <= 0) {
|
||||
+ depth = __this_cpu_dec_return(fpu_recursion_depth);
|
||||
+ if (depth == 0) {
|
||||
#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
|
||||
kernel_fpu_end();
|
||||
#elif defined(CONFIG_PPC64)
|
||||
- if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
|
||||
+ if (cpu_has_feature(CPU_FTR_VSX_COMP))
|
||||
disable_kernel_vsx();
|
||||
- preempt_enable();
|
||||
- } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
|
||||
+ else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP))
|
||||
disable_kernel_altivec();
|
||||
- preempt_enable();
|
||||
- } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
|
||||
+ else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE))
|
||||
disable_kernel_fp();
|
||||
- preempt_enable();
|
||||
- }
|
||||
#elif defined(CONFIG_ARM64)
|
||||
kernel_neon_end();
|
||||
#endif
|
||||
+ } else {
|
||||
+ WARN_ON_ONCE(depth < 0);
|
||||
}
|
||||
|
||||
- TRACE_DCN_FPU(false, function_name, line, *pcpu);
|
||||
- put_cpu_ptr(&fpu_recursion_depth);
|
||||
+ TRACE_DCN_FPU(false, function_name, line, depth);
|
||||
+ preempt_enable();
|
||||
}
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
From 533bd452afe45e25b387a11ab7748f9633995fee Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Thu, 21 Sep 2023 16:15:14 +0200
|
||||
Subject: [PATCH 012/195] drm/amd/display: Add a warning if the FPU is used
|
||||
outside from task context.
|
||||
|
||||
Add a warning if the FPU is used from any context other than task
|
||||
context. This is only precaution since the code is not able to be used
|
||||
from softirq while the API allows it on x86 for instance.
|
||||
|
||||
Link: https://lore.kernel.org/r/20230921141516.520471-4-bigeasy@linutronix.de
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
|
||||
index 8bd5926b47e0..4ae4720535a5 100644
|
||||
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
|
||||
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
|
||||
@@ -84,6 +84,7 @@ void dc_fpu_begin(const char *function_name, const int line)
|
||||
{
|
||||
int depth;
|
||||
|
||||
+ WARN_ON_ONCE(!in_task());
|
||||
preempt_disable();
|
||||
depth = __this_cpu_inc_return(fpu_recursion_depth);
|
||||
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,96 @@
|
|||
From 7c34575c85d40e203f3b18ccb63e526d978b1c78 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Thu, 21 Sep 2023 16:15:15 +0200
|
||||
Subject: [PATCH 013/195] drm/amd/display: Move the memory allocation out of
|
||||
dcn21_validate_bandwidth_fp().
|
||||
|
||||
dcn21_validate_bandwidth_fp() is invoked while FPU access has been
|
||||
enabled. FPU access requires disabling preemption even on PREEMPT_RT.
|
||||
It is not possible to allocate memory with disabled preemption even with
|
||||
GFP_ATOMIC on PREEMPT_RT.
|
||||
|
||||
Move the memory allocation before FPU access is enabled.
|
||||
|
||||
Link: https://bugzilla.kernel.org/show_bug.cgi?id=217928
|
||||
Link: https://lore.kernel.org/r/20230921141516.520471-5-bigeasy@linutronix.de
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 10 +++++++++-
|
||||
drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c | 7 ++-----
|
||||
drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h | 5 ++---
|
||||
3 files changed, 13 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
|
||||
index d1a25fe6c44f..5674c3450fc3 100644
|
||||
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
|
||||
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
|
||||
@@ -953,9 +953,17 @@ static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
|
||||
bool fast_validate)
|
||||
{
|
||||
bool voltage_supported;
|
||||
+ display_e2e_pipe_params_st *pipes;
|
||||
+
|
||||
+ pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
|
||||
+ if (!pipes)
|
||||
+ return false;
|
||||
+
|
||||
DC_FP_START();
|
||||
- voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate);
|
||||
+ voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes);
|
||||
DC_FP_END();
|
||||
+
|
||||
+ kfree(pipes);
|
||||
return voltage_supported;
|
||||
}
|
||||
|
||||
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
|
||||
index 8a5a038fd855..89d4e969cfd8 100644
|
||||
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
|
||||
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
|
||||
@@ -2311,9 +2311,8 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context,
|
||||
&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
}
|
||||
|
||||
-bool dcn21_validate_bandwidth_fp(struct dc *dc,
|
||||
- struct dc_state *context,
|
||||
- bool fast_validate)
|
||||
+bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
|
||||
+ bool fast_validate, display_e2e_pipe_params_st *pipes)
|
||||
{
|
||||
bool out = false;
|
||||
|
||||
@@ -2322,7 +2321,6 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc,
|
||||
int vlevel = 0;
|
||||
int pipe_split_from[MAX_PIPES];
|
||||
int pipe_cnt = 0;
|
||||
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
|
||||
BW_VAL_TRACE_COUNT();
|
||||
@@ -2362,7 +2360,6 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc,
|
||||
out = false;
|
||||
|
||||
validate_out:
|
||||
- kfree(pipes);
|
||||
|
||||
BW_VAL_TRACE_FINISH();
|
||||
|
||||
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
|
||||
index c51badf7b68a..a81a0b9e6884 100644
|
||||
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
|
||||
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
|
||||
@@ -77,9 +77,8 @@ int dcn21_populate_dml_pipes_from_context(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
bool fast_validate);
|
||||
-bool dcn21_validate_bandwidth_fp(struct dc *dc,
|
||||
- struct dc_state *context,
|
||||
- bool fast_validate);
|
||||
+bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, bool
|
||||
+ fast_validate, display_e2e_pipe_params_st *pipes);
|
||||
void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
|
||||
|
||||
void dcn21_clk_mgr_set_bw_params_wm_table(struct clk_bw_params *bw_params);
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,130 @@
|
|||
From bf3d7bb0ed627ca816eb490b7ac2b26ec02e63d5 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Thu, 21 Sep 2023 16:15:16 +0200
|
||||
Subject: [PATCH 014/195] drm/amd/display: Move the memory allocation out of
|
||||
dcn20_validate_bandwidth_fp().
|
||||
|
||||
dcn20_validate_bandwidth_fp() is invoked while FPU access has been
|
||||
enabled. FPU access requires disabling preemption even on PREEMPT_RT.
|
||||
It is not possible to allocate memory with disabled preemption even with
|
||||
GFP_ATOMIC on PREEMPT_RT.
|
||||
|
||||
Move the memory allocation before FPU access is enabled.
|
||||
To preserve previous "clean" state of "pipes" add a memset() before the
|
||||
second invocation of dcn20_validate_bandwidth_internal() where the
|
||||
variable is used.
|
||||
|
||||
Link: https://lore.kernel.org/r/20230921141516.520471-6-bigeasy@linutronix.de
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
.../drm/amd/display/dc/dcn20/dcn20_resource.c | 10 +++++++++-
|
||||
.../gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c | 16 +++++++---------
|
||||
.../gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h | 5 ++---
|
||||
3 files changed, 18 insertions(+), 13 deletions(-)
|
||||
|
||||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
|
||||
index d587f807dfd7..5036a3e60832 100644
|
||||
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
|
||||
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
|
||||
@@ -2141,9 +2141,17 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
|
||||
bool fast_validate)
|
||||
{
|
||||
bool voltage_supported;
|
||||
+ display_e2e_pipe_params_st *pipes;
|
||||
+
|
||||
+ pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
|
||||
+ if (!pipes)
|
||||
+ return false;
|
||||
+
|
||||
DC_FP_START();
|
||||
- voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
|
||||
+ voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes);
|
||||
DC_FP_END();
|
||||
+
|
||||
+ kfree(pipes);
|
||||
return voltage_supported;
|
||||
}
|
||||
|
||||
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
|
||||
index 89d4e969cfd8..68970d6cf031 100644
|
||||
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
|
||||
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
|
||||
@@ -2018,7 +2018,7 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
|
||||
}
|
||||
|
||||
static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context,
|
||||
- bool fast_validate)
|
||||
+ bool fast_validate, display_e2e_pipe_params_st *pipes)
|
||||
{
|
||||
bool out = false;
|
||||
|
||||
@@ -2027,7 +2027,6 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
|
||||
int vlevel = 0;
|
||||
int pipe_split_from[MAX_PIPES];
|
||||
int pipe_cnt = 0;
|
||||
- display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
|
||||
BW_VAL_TRACE_COUNT();
|
||||
@@ -2062,16 +2061,14 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
|
||||
out = false;
|
||||
|
||||
validate_out:
|
||||
- kfree(pipes);
|
||||
|
||||
BW_VAL_TRACE_FINISH();
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
-bool dcn20_validate_bandwidth_fp(struct dc *dc,
|
||||
- struct dc_state *context,
|
||||
- bool fast_validate)
|
||||
+bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
|
||||
+ bool fast_validate, display_e2e_pipe_params_st *pipes)
|
||||
{
|
||||
bool voltage_supported = false;
|
||||
bool full_pstate_supported = false;
|
||||
@@ -2090,11 +2087,11 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc,
|
||||
ASSERT(context != dc->current_state);
|
||||
|
||||
if (fast_validate) {
|
||||
- return dcn20_validate_bandwidth_internal(dc, context, true);
|
||||
+ return dcn20_validate_bandwidth_internal(dc, context, true, pipes);
|
||||
}
|
||||
|
||||
// Best case, we support full UCLK switch latency
|
||||
- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
|
||||
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
|
||||
full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
|
||||
|
||||
if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
|
||||
@@ -2106,7 +2103,8 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc,
|
||||
// Fallback: Try to only support G6 temperature read latency
|
||||
context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
|
||||
|
||||
- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
|
||||
+ memset(pipes, 0, dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st));
|
||||
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
|
||||
dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
|
||||
|
||||
if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
|
||||
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
|
||||
index a81a0b9e6884..b6c34198ddc8 100644
|
||||
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
|
||||
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
|
||||
@@ -61,9 +61,8 @@ void dcn20_update_bounding_box(struct dc *dc,
|
||||
unsigned int num_states);
|
||||
void dcn20_patch_bounding_box(struct dc *dc,
|
||||
struct _vcs_dpi_soc_bounding_box_st *bb);
|
||||
-bool dcn20_validate_bandwidth_fp(struct dc *dc,
|
||||
- struct dc_state *context,
|
||||
- bool fast_validate);
|
||||
+bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
|
||||
+ bool fast_validate, display_e2e_pipe_params_st *pipes);
|
||||
void dcn20_fpu_set_wm_ranges(int i,
|
||||
struct pp_smu_wm_range_sets *ranges,
|
||||
struct _vcs_dpi_soc_bounding_box_st *loaded_bb);
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
From 37205f0ce3bef04671d958f03e1460dcaeed41b7 Mon Sep 17 00:00:00 2001
|
||||
From 30de1da993f600f4771b3f3724ce0667f66ac69b Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Mon, 15 Aug 2022 17:29:50 +0200
|
||||
Subject: [PATCH 09/62] net: Avoid the IPI to free the
|
||||
Subject: [PATCH 015/195] net: Avoid the IPI to free the
|
||||
|
||||
skb_attempt_defer_free() collects a skbs, which was allocated on a
|
||||
remote CPU, on a per-CPU list. These skbs are either freed on that
|
||||
|
@ -19,15 +19,15 @@ To void all this, schedule the deferred clean up from a worker.
|
|||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
include/linux/netdevice.h | 4 ++++
|
||||
net/core/dev.c | 37 ++++++++++++++++++++++++++++---------
|
||||
net/core/dev.c | 39 ++++++++++++++++++++++++++++++---------
|
||||
net/core/skbuff.c | 7 ++++++-
|
||||
3 files changed, 38 insertions(+), 10 deletions(-)
|
||||
3 files changed, 40 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
|
||||
index 0373e0935990..55d698367883 100644
|
||||
index b8e60a20416b..ffa5248a90e2 100644
|
||||
--- a/include/linux/netdevice.h
|
||||
+++ b/include/linux/netdevice.h
|
||||
@@ -3169,7 +3169,11 @@ struct softnet_data {
|
||||
@@ -3258,7 +3258,11 @@ struct softnet_data {
|
||||
int defer_count;
|
||||
int defer_ipi_scheduled;
|
||||
struct sk_buff *defer_list;
|
||||
|
@ -40,10 +40,10 @@ index 0373e0935990..55d698367883 100644
|
|||
|
||||
static inline void input_queue_head_incr(struct softnet_data *sd)
|
||||
diff --git a/net/core/dev.c b/net/core/dev.c
|
||||
index 29ae6265a408..6a13043ff0f3 100644
|
||||
index e480afb50d4c..227338b8cda8 100644
|
||||
--- a/net/core/dev.c
|
||||
+++ b/net/core/dev.c
|
||||
@@ -4621,15 +4621,6 @@ static void rps_trigger_softirq(void *data)
|
||||
@@ -4705,15 +4705,6 @@ static void rps_trigger_softirq(void *data)
|
||||
|
||||
#endif /* CONFIG_RPS */
|
||||
|
||||
|
@ -57,13 +57,14 @@ index 29ae6265a408..6a13043ff0f3 100644
|
|||
-}
|
||||
-
|
||||
/*
|
||||
* Check if this softnet_data structure is another cpu one
|
||||
* If yes, queue it to our IPI list and return 1
|
||||
@@ -6687,6 +6678,30 @@ static void skb_defer_free_flush(struct softnet_data *sd)
|
||||
* After we queued a packet into sd->input_pkt_queue,
|
||||
* we need to make sure this queue is serviced soon.
|
||||
@@ -6682,6 +6673,32 @@ static void skb_defer_free_flush(struct softnet_data *sd)
|
||||
}
|
||||
}
|
||||
|
||||
+#ifndef CONFIG_PREEMPT_RT
|
||||
+
|
||||
+/* Called from hardirq (IPI) context */
|
||||
+static void trigger_rx_softirq(void *data)
|
||||
+{
|
||||
|
@ -85,12 +86,13 @@ index 29ae6265a408..6a13043ff0f3 100644
|
|||
+ skb_defer_free_flush(sd);
|
||||
+ local_bh_enable();
|
||||
+}
|
||||
+
|
||||
+#endif
|
||||
+
|
||||
static __latent_entropy void net_rx_action(struct softirq_action *h)
|
||||
static int napi_threaded_poll(void *data)
|
||||
{
|
||||
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
|
||||
@@ -11438,7 +11453,11 @@ static int __init net_dev_init(void)
|
||||
struct napi_struct *napi = data;
|
||||
@@ -11606,7 +11623,11 @@ static int __init net_dev_init(void)
|
||||
INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
|
||||
sd->cpu = i;
|
||||
#endif
|
||||
|
@ -103,10 +105,10 @@ index 29ae6265a408..6a13043ff0f3 100644
|
|||
|
||||
init_gro_hash(&sd->backlog);
|
||||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
|
||||
index 8a819d0a7bfb..424ad963fa0c 100644
|
||||
index 011d69029112..9a9fbe18bf2f 100644
|
||||
--- a/net/core/skbuff.c
|
||||
+++ b/net/core/skbuff.c
|
||||
@@ -6682,6 +6682,11 @@ nodefer: __kfree_skb(skb);
|
||||
@@ -6844,8 +6844,13 @@ nodefer: __kfree_skb(skb);
|
||||
/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
|
||||
* if we are unlucky enough (this seems very unlikely).
|
||||
*/
|
||||
|
@ -119,6 +121,8 @@ index 8a819d0a7bfb..424ad963fa0c 100644
|
|||
+#endif
|
||||
+ }
|
||||
}
|
||||
|
||||
static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,81 +0,0 @@
|
|||
From a61678645efc9d4aa757b038d91bbef571c3ba17 Mon Sep 17 00:00:00 2001
|
||||
From: Haris Okanovic <haris.okanovic@ni.com>
|
||||
Date: Tue, 15 Aug 2017 15:13:08 -0500
|
||||
Subject: [PATCH 16/62] tpm_tis: fix stall after iowrite*()s
|
||||
|
||||
ioread8() operations to TPM MMIO addresses can stall the cpu when
|
||||
immediately following a sequence of iowrite*()'s to the same region.
|
||||
|
||||
For example, cyclitest measures ~400us latency spikes when a non-RT
|
||||
usermode application communicates with an SPI-based TPM chip (Intel Atom
|
||||
E3940 system, PREEMPT_RT kernel). The spikes are caused by a
|
||||
stalling ioread8() operation following a sequence of 30+ iowrite8()s to
|
||||
the same address. I believe this happens because the write sequence is
|
||||
buffered (in cpu or somewhere along the bus), and gets flushed on the
|
||||
first LOAD instruction (ioread*()) that follows.
|
||||
|
||||
The enclosed change appears to fix this issue: read the TPM chip's
|
||||
access register (status code) after every iowrite*() operation to
|
||||
amortize the cost of flushing data to chip across multiple instructions.
|
||||
|
||||
Signed-off-by: Haris Okanovic <haris.okanovic@ni.com>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
drivers/char/tpm/tpm_tis.c | 29 +++++++++++++++++++++++++++--
|
||||
1 file changed, 27 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
|
||||
index 0d084d6652c4..5d620322bdc2 100644
|
||||
--- a/drivers/char/tpm/tpm_tis.c
|
||||
+++ b/drivers/char/tpm/tpm_tis.c
|
||||
@@ -50,6 +50,31 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da
|
||||
return container_of(data, struct tpm_tis_tcg_phy, priv);
|
||||
}
|
||||
|
||||
+#ifdef CONFIG_PREEMPT_RT
|
||||
+/*
|
||||
+ * Flushes previous write operations to chip so that a subsequent
|
||||
+ * ioread*()s won't stall a cpu.
|
||||
+ */
|
||||
+static inline void tpm_tis_flush(void __iomem *iobase)
|
||||
+{
|
||||
+ ioread8(iobase + TPM_ACCESS(0));
|
||||
+}
|
||||
+#else
|
||||
+#define tpm_tis_flush(iobase) do { } while (0)
|
||||
+#endif
|
||||
+
|
||||
+static inline void tpm_tis_iowrite8(u8 b, void __iomem *iobase, u32 addr)
|
||||
+{
|
||||
+ iowrite8(b, iobase + addr);
|
||||
+ tpm_tis_flush(iobase);
|
||||
+}
|
||||
+
|
||||
+static inline void tpm_tis_iowrite32(u32 b, void __iomem *iobase, u32 addr)
|
||||
+{
|
||||
+ iowrite32(b, iobase + addr);
|
||||
+ tpm_tis_flush(iobase);
|
||||
+}
|
||||
+
|
||||
static int interrupts = -1;
|
||||
module_param(interrupts, int, 0444);
|
||||
MODULE_PARM_DESC(interrupts, "Enable interrupts");
|
||||
@@ -202,12 +227,12 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
|
||||
switch (io_mode) {
|
||||
case TPM_TIS_PHYS_8:
|
||||
while (len--)
|
||||
- iowrite8(*value++, phy->iobase + addr);
|
||||
+ tpm_tis_iowrite8(*value++, phy->iobase, addr);
|
||||
break;
|
||||
case TPM_TIS_PHYS_16:
|
||||
return -EINVAL;
|
||||
case TPM_TIS_PHYS_32:
|
||||
- iowrite32(le32_to_cpu(*((__le32 *)value)), phy->iobase + addr);
|
||||
+ tpm_tis_iowrite32(le32_to_cpu(*((__le32 *)value)), phy->iobase, addr);
|
||||
break;
|
||||
}
|
||||
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
From bd3f264eeb33c0602b3f9d66a603e5a2e8f9d9ee Mon Sep 17 00:00:00 2001
|
||||
From c17f2c393b160199e64c464189661283345d2e73 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Wed, 7 Aug 2019 18:15:38 +0200
|
||||
Subject: [PATCH 10/62] x86: Allow to enable RT
|
||||
Subject: [PATCH 016/195] x86: Allow to enable RT
|
||||
|
||||
Allow to select RT.
|
||||
|
||||
|
@ -12,13 +12,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
|
||||
index 4c9bfc4be58d..f7f81e3012cc 100644
|
||||
index fe3292e310d4..984068efd48b 100644
|
||||
--- a/arch/x86/Kconfig
|
||||
+++ b/arch/x86/Kconfig
|
||||
@@ -27,6 +27,7 @@ config X86_64
|
||||
# Options that are inherently 64-bit kernel only:
|
||||
@@ -28,6 +28,7 @@ config X86_64
|
||||
select ARCH_HAS_GIGANTIC_PAGE
|
||||
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
|
||||
select ARCH_SUPPORTS_PER_VMA_LOCK
|
||||
+ select ARCH_SUPPORTS_RT
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select HAVE_ARCH_SOFT_DIRTY
|
|
@ -1,7 +1,7 @@
|
|||
From 4c22c4a6e79603a30f6b875c8fd65efdad35ac8f Mon Sep 17 00:00:00 2001
|
||||
From 44e5f60ede7f2407fe12fa477b09f1a156e0634b Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Thu, 7 Nov 2019 17:49:20 +0100
|
||||
Subject: [PATCH 11/62] x86: Enable RT also on 32bit
|
||||
Subject: [PATCH 017/195] x86: Enable RT also on 32bit
|
||||
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
|
@ -10,18 +10,18 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
|
||||
index f7f81e3012cc..c9bed9c69423 100644
|
||||
index 984068efd48b..1b445e289190 100644
|
||||
--- a/arch/x86/Kconfig
|
||||
+++ b/arch/x86/Kconfig
|
||||
@@ -27,7 +27,6 @@ config X86_64
|
||||
# Options that are inherently 64-bit kernel only:
|
||||
@@ -28,7 +28,6 @@ config X86_64
|
||||
select ARCH_HAS_GIGANTIC_PAGE
|
||||
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
|
||||
select ARCH_SUPPORTS_PER_VMA_LOCK
|
||||
- select ARCH_SUPPORTS_RT
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select HAVE_ARCH_SOFT_DIRTY
|
||||
select MODULES_USE_ELF_RELA
|
||||
@@ -114,6 +113,7 @@ config X86
|
||||
@@ -118,6 +117,7 @@ config X86
|
||||
select ARCH_USES_CFI_TRAPS if X86_64 && CFI_CLANG
|
||||
select ARCH_SUPPORTS_LTO_CLANG
|
||||
select ARCH_SUPPORTS_LTO_CLANG_THIN
|
|
@ -1,33 +0,0 @@
|
|||
From 0b833bd5bb0c55f681cf80dc6eaf59ab1cea7b51 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Fri, 11 Mar 2022 17:44:57 +0100
|
||||
Subject: [PATCH 18/62] locking/lockdep: Remove lockdep_init_map_crosslock.
|
||||
|
||||
The cross-release bits have been removed, lockdep_init_map_crosslock() is
|
||||
a leftover.
|
||||
|
||||
Remove lockdep_init_map_crosslock.
|
||||
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Reviewed-by: Waiman Long <longman@redhat.com>
|
||||
Link: https://lore.kernel.org/r/20220311164457.46461-1-bigeasy@linutronix.de
|
||||
Link: https://lore.kernel.org/r/YqITgY+2aPITu96z@linutronix.de
|
||||
---
|
||||
include/linux/lockdep.h | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
|
||||
index 1f1099dac3f0..1023f349af71 100644
|
||||
--- a/include/linux/lockdep.h
|
||||
+++ b/include/linux/lockdep.h
|
||||
@@ -435,7 +435,6 @@ enum xhlock_context_t {
|
||||
XHLOCK_CTX_NR,
|
||||
};
|
||||
|
||||
-#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
|
||||
/*
|
||||
* To initialize a lockdep_map statically use this macro.
|
||||
* Note that _name must not be NULL.
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,63 @@
|
|||
From f4ac796216884ec5942d1d74fde86d1ed5f1f695 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Tue, 1 Aug 2023 17:26:48 +0200
|
||||
Subject: [PATCH 018/195] sched/rt: Don't try push tasks if there are none.
|
||||
|
||||
I have a RT task X at a high priority and cyclictest on each CPU with
|
||||
lower priority than X's. If X is active and each CPU wakes their own
|
||||
cylictest thread then it ends in a longer rto_push storm.
|
||||
A random CPU determines via balance_rt() that the CPU on which X is
|
||||
running needs to push tasks. X has the highest priority, cyclictest is
|
||||
next in line so there is nothing that can be done since the task with
|
||||
the higher priority is not touched.
|
||||
|
||||
tell_cpu_to_push() increments rto_loop_next and schedules
|
||||
rto_push_irq_work_func() on X's CPU. The other CPUs also increment the
|
||||
loop counter and do the same. Once rto_push_irq_work_func() is active it
|
||||
does nothing because it has _no_ pushable tasks on its runqueue. Then
|
||||
checks rto_next_cpu() and decides to queue irq_work on the local CPU
|
||||
because another CPU requested a push by incrementing the counter.
|
||||
|
||||
I have traces where ~30 CPUs request this ~3 times each before it
|
||||
finally ends. This greatly increases X's runtime while X isn't making
|
||||
much progress.
|
||||
|
||||
Teach rto_next_cpu() to only return CPUs which also have tasks on their
|
||||
runqueue which can be pushed away. This does not reduce the
|
||||
tell_cpu_to_push() invocations (rto_loop_next counter increments) but
|
||||
reduces the amount of issued rto_push_irq_work_func() if nothing can be
|
||||
done. As the result the overloaded CPU is blocked less often.
|
||||
|
||||
There are still cases where the "same job" is repeated several times
|
||||
(for instance the current CPU needs to resched but didn't yet because
|
||||
the irq-work is repeated a few times and so the old task remains on the
|
||||
CPU) but the majority of request end in tell_cpu_to_push() before an IPI
|
||||
is issued.
|
||||
|
||||
Reviewed-by: "Steven Rostedt (Google)" <rostedt@goodmis.org>
|
||||
Link: https://lore.kernel.org/r/20230801152648._y603AS_@linutronix.de
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
kernel/sched/rt.c | 5 ++++-
|
||||
1 file changed, 4 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
|
||||
index 904dd8534597..563161845e79 100644
|
||||
--- a/kernel/sched/rt.c
|
||||
+++ b/kernel/sched/rt.c
|
||||
@@ -2249,8 +2249,11 @@ static int rto_next_cpu(struct root_domain *rd)
|
||||
|
||||
rd->rto_cpu = cpu;
|
||||
|
||||
- if (cpu < nr_cpu_ids)
|
||||
+ if (cpu < nr_cpu_ids) {
|
||||
+ if (!has_pushable_tasks(cpu_rq(cpu)))
|
||||
+ continue;
|
||||
return cpu;
|
||||
+ }
|
||||
|
||||
rd->rto_cpu = -1;
|
||||
|
||||
--
|
||||
2.43.0
|
||||
|
File diff suppressed because it is too large
Load Diff
|
@ -1,7 +1,7 @@
|
|||
From d2864e2d892720940d5a3acb606abcc10089bd73 Mon Sep 17 00:00:00 2001
|
||||
From ddbe9f49b78c945704d68bd7333b354b13527703 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Wed, 1 Dec 2021 17:41:09 +0100
|
||||
Subject: [PATCH 12/62] softirq: Use a dedicated thread for timer wakeups.
|
||||
Subject: [PATCH 019/195] softirq: Use a dedicated thread for timer wakeups.
|
||||
|
||||
A timer/hrtimer softirq is raised in-IRQ context. With threaded
|
||||
interrupts enabled or on PREEMPT_RT this leads to waking the ksoftirqd
|
||||
|
@ -72,10 +72,10 @@ index 4a1dc88ddbff..0efba74a835c 100644
|
|||
|
||||
static inline struct task_struct *this_cpu_ksoftirqd(void)
|
||||
diff --git a/kernel/softirq.c b/kernel/softirq.c
|
||||
index c8a6913c067d..ed6d7c41aa17 100644
|
||||
index 210cf5f8d92c..c29c30106eb8 100644
|
||||
--- a/kernel/softirq.c
|
||||
+++ b/kernel/softirq.c
|
||||
@@ -637,6 +637,29 @@ static inline void tick_irq_exit(void)
|
||||
@@ -619,6 +619,29 @@ static inline void tick_irq_exit(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -105,7 +105,7 @@ index c8a6913c067d..ed6d7c41aa17 100644
|
|||
static inline void __irq_exit_rcu(void)
|
||||
{
|
||||
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
|
||||
@@ -646,8 +669,13 @@ static inline void __irq_exit_rcu(void)
|
||||
@@ -628,8 +651,13 @@ static inline void __irq_exit_rcu(void)
|
||||
#endif
|
||||
account_hardirq_exit(current);
|
||||
preempt_count_sub(HARDIRQ_OFFSET);
|
||||
|
@ -121,7 +121,7 @@ index c8a6913c067d..ed6d7c41aa17 100644
|
|||
|
||||
tick_irq_exit();
|
||||
}
|
||||
@@ -976,12 +1004,70 @@ static struct smp_hotplug_thread softirq_threads = {
|
||||
@@ -963,12 +991,70 @@ static struct smp_hotplug_thread softirq_threads = {
|
||||
.thread_comm = "ksoftirqd/%u",
|
||||
};
|
||||
|
||||
|
@ -194,10 +194,10 @@ index c8a6913c067d..ed6d7c41aa17 100644
|
|||
}
|
||||
early_initcall(spawn_ksoftirqd);
|
||||
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
|
||||
index 5561dabc9b22..c5d480d5da15 100644
|
||||
index 760793998cdd..9f4d7ab03e39 100644
|
||||
--- a/kernel/time/hrtimer.c
|
||||
+++ b/kernel/time/hrtimer.c
|
||||
@@ -1805,7 +1805,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
|
||||
@@ -1808,7 +1808,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
|
||||
if (!ktime_before(now, cpu_base->softirq_expires_next)) {
|
||||
cpu_base->softirq_expires_next = KTIME_MAX;
|
||||
cpu_base->softirq_activated = 1;
|
||||
|
@ -206,7 +206,7 @@ index 5561dabc9b22..c5d480d5da15 100644
|
|||
}
|
||||
|
||||
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
|
||||
@@ -1918,7 +1918,7 @@ void hrtimer_run_queues(void)
|
||||
@@ -1921,7 +1921,7 @@ void hrtimer_run_queues(void)
|
||||
if (!ktime_before(now, cpu_base->softirq_expires_next)) {
|
||||
cpu_base->softirq_expires_next = KTIME_MAX;
|
||||
cpu_base->softirq_activated = 1;
|
||||
|
@ -216,10 +216,10 @@ index 5561dabc9b22..c5d480d5da15 100644
|
|||
|
||||
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
|
||||
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
|
||||
index 717fcb9fb14a..e6219da89933 100644
|
||||
index 63a8ce7177dd..7cad6fe3c035 100644
|
||||
--- a/kernel/time/timer.c
|
||||
+++ b/kernel/time/timer.c
|
||||
@@ -1822,7 +1822,7 @@ static void run_local_timers(void)
|
||||
@@ -2054,7 +2054,7 @@ static void run_local_timers(void)
|
||||
if (time_before(jiffies, base->next_expiry))
|
||||
return;
|
||||
}
|
|
@ -1,607 +0,0 @@
|
|||
From 30f2ae30ddb074556a0f9a9a6bcc4bb071d7ea12 Mon Sep 17 00:00:00 2001
|
||||
From: John Ogness <john.ogness@linutronix.de>
|
||||
Date: Fri, 4 Feb 2022 16:01:17 +0106
|
||||
Subject: [PATCH 20/62] printk: add infrastucture for atomic consoles
|
||||
|
||||
Many times it is not possible to see the console output on
|
||||
panic because printing threads cannot be scheduled and/or the
|
||||
console is already taken and forcibly overtaking/busting the
|
||||
locks does provide the hoped results.
|
||||
|
||||
Introduce a new infrastructure to support "atomic consoles".
|
||||
A new optional callback in struct console, write_atomic(), is
|
||||
available for consoles to provide an implemention for writing
|
||||
console messages. The implementation must be NMI safe if they
|
||||
can run on an architecture where NMIs exist.
|
||||
|
||||
Console drivers implementing the write_atomic() callback must
|
||||
also select CONFIG_HAVE_ATOMIC_CONSOLE in order to enable the
|
||||
atomic console code within the printk subsystem.
|
||||
|
||||
If atomic consoles are available, panic() will flush the kernel
|
||||
log only to the atomic consoles (before busting spinlocks).
|
||||
Afterwards, panic() will continue as before, which includes
|
||||
attempting to flush the other (non-atomic) consoles.
|
||||
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
include/linux/console.h | 16 ++-
|
||||
init/Kconfig | 4 +
|
||||
kernel/panic.c | 6 +-
|
||||
kernel/printk/printk.c | 293 ++++++++++++++++++++++++++++++++++++----
|
||||
4 files changed, 290 insertions(+), 29 deletions(-)
|
||||
|
||||
diff --git a/include/linux/console.h b/include/linux/console.h
|
||||
index 143653090c48..8a813cbaf928 100644
|
||||
--- a/include/linux/console.h
|
||||
+++ b/include/linux/console.h
|
||||
@@ -138,9 +138,19 @@ static inline int con_debug_leave(void)
|
||||
#define CON_BRL (32) /* Used for a braille device */
|
||||
#define CON_EXTENDED (64) /* Use the extended output format a la /dev/kmsg */
|
||||
|
||||
+#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
|
||||
+struct console_atomic_data {
|
||||
+ u64 seq;
|
||||
+ char *text;
|
||||
+ char *ext_text;
|
||||
+ char *dropped_text;
|
||||
+};
|
||||
+#endif
|
||||
+
|
||||
struct console {
|
||||
char name[16];
|
||||
void (*write)(struct console *, const char *, unsigned);
|
||||
+ void (*write_atomic)(struct console *, const char *, unsigned);
|
||||
int (*read)(struct console *, char *, unsigned);
|
||||
struct tty_driver *(*device)(struct console *, int *);
|
||||
void (*unblank)(void);
|
||||
@@ -153,7 +163,10 @@ struct console {
|
||||
uint ispeed;
|
||||
uint ospeed;
|
||||
u64 seq;
|
||||
- unsigned long dropped;
|
||||
+ atomic_long_t dropped;
|
||||
+#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
|
||||
+ struct console_atomic_data *atomic_data;
|
||||
+#endif
|
||||
struct task_struct *thread;
|
||||
bool blocked;
|
||||
|
||||
@@ -184,6 +197,7 @@ extern int console_set_on_cmdline;
|
||||
extern struct console *early_console;
|
||||
|
||||
enum con_flush_mode {
|
||||
+ CONSOLE_ATOMIC_FLUSH_PENDING,
|
||||
CONSOLE_FLUSH_PENDING,
|
||||
CONSOLE_REPLAY_ALL,
|
||||
};
|
||||
diff --git a/init/Kconfig b/init/Kconfig
|
||||
index de255842f5d0..d45312780b3a 100644
|
||||
--- a/init/Kconfig
|
||||
+++ b/init/Kconfig
|
||||
@@ -1582,6 +1582,10 @@ config PRINTK
|
||||
very difficult to diagnose system problems, saying N here is
|
||||
strongly discouraged.
|
||||
|
||||
+config HAVE_ATOMIC_CONSOLE
|
||||
+ bool
|
||||
+ default n
|
||||
+
|
||||
config BUG
|
||||
bool "BUG() support" if EXPERT
|
||||
default y
|
||||
diff --git a/kernel/panic.c b/kernel/panic.c
|
||||
index 88cd873c7c30..97cc495d95f8 100644
|
||||
--- a/kernel/panic.c
|
||||
+++ b/kernel/panic.c
|
||||
@@ -322,7 +322,6 @@ void panic(const char *fmt, ...)
|
||||
panic_smp_self_stop();
|
||||
|
||||
console_verbose();
|
||||
- bust_spinlocks(1);
|
||||
va_start(args, fmt);
|
||||
len = vscnprintf(buf, sizeof(buf), fmt, args);
|
||||
va_end(args);
|
||||
@@ -339,6 +338,11 @@ void panic(const char *fmt, ...)
|
||||
dump_stack();
|
||||
#endif
|
||||
|
||||
+ /* If atomic consoles are available, flush the kernel log. */
|
||||
+ console_flush_on_panic(CONSOLE_ATOMIC_FLUSH_PENDING);
|
||||
+
|
||||
+ bust_spinlocks(1);
|
||||
+
|
||||
/*
|
||||
* If kgdb is enabled, give it a chance to run before we stop all
|
||||
* the other CPUs or else we won't be able to debug processes left
|
||||
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
|
||||
index e9f9b66608a0..73b1727087c7 100644
|
||||
--- a/kernel/printk/printk.c
|
||||
+++ b/kernel/printk/printk.c
|
||||
@@ -44,6 +44,7 @@
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/uio.h>
|
||||
+#include <linux/clocksource.h>
|
||||
#include <linux/sched/clock.h>
|
||||
#include <linux/sched/debug.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
@@ -2060,19 +2061,28 @@ static int console_trylock_spinning(void)
|
||||
* dropped, a dropped message will be written out first.
|
||||
*/
|
||||
static void call_console_driver(struct console *con, const char *text, size_t len,
|
||||
- char *dropped_text)
|
||||
+ char *dropped_text, bool atomic_printing)
|
||||
{
|
||||
+ unsigned long dropped = 0;
|
||||
size_t dropped_len;
|
||||
|
||||
- if (con->dropped && dropped_text) {
|
||||
+ if (dropped_text)
|
||||
+ dropped = atomic_long_xchg_relaxed(&con->dropped, 0);
|
||||
+
|
||||
+ if (dropped) {
|
||||
dropped_len = snprintf(dropped_text, DROPPED_TEXT_MAX,
|
||||
"** %lu printk messages dropped **\n",
|
||||
- con->dropped);
|
||||
- con->dropped = 0;
|
||||
- con->write(con, dropped_text, dropped_len);
|
||||
+ dropped);
|
||||
+ if (atomic_printing)
|
||||
+ con->write_atomic(con, dropped_text, dropped_len);
|
||||
+ else
|
||||
+ con->write(con, dropped_text, dropped_len);
|
||||
}
|
||||
|
||||
- con->write(con, text, len);
|
||||
+ if (atomic_printing)
|
||||
+ con->write_atomic(con, text, len);
|
||||
+ else
|
||||
+ con->write(con, text, len);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2430,6 +2440,76 @@ asmlinkage __visible int _printk(const char *fmt, ...)
|
||||
}
|
||||
EXPORT_SYMBOL(_printk);
|
||||
|
||||
+#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
|
||||
+static void __free_atomic_data(struct console_atomic_data *d)
|
||||
+{
|
||||
+ kfree(d->text);
|
||||
+ kfree(d->ext_text);
|
||||
+ kfree(d->dropped_text);
|
||||
+}
|
||||
+
|
||||
+static void free_atomic_data(struct console_atomic_data *d)
|
||||
+{
|
||||
+ int count = 1;
|
||||
+ int i;
|
||||
+
|
||||
+ if (!d)
|
||||
+ return;
|
||||
+
|
||||
+#ifdef CONFIG_HAVE_NMI
|
||||
+ count = 2;
|
||||
+#endif
|
||||
+
|
||||
+ for (i = 0; i < count; i++)
|
||||
+ __free_atomic_data(&d[i]);
|
||||
+ kfree(d);
|
||||
+}
|
||||
+
|
||||
+static int __alloc_atomic_data(struct console_atomic_data *d, short flags)
|
||||
+{
|
||||
+ d->text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
|
||||
+ if (!d->text)
|
||||
+ return -1;
|
||||
+
|
||||
+ if (flags & CON_EXTENDED) {
|
||||
+ d->ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL);
|
||||
+ if (!d->ext_text)
|
||||
+ return -1;
|
||||
+ } else {
|
||||
+ d->dropped_text = kmalloc(DROPPED_TEXT_MAX, GFP_KERNEL);
|
||||
+ if (!d->dropped_text)
|
||||
+ return -1;
|
||||
+ }
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+static struct console_atomic_data *alloc_atomic_data(short flags)
|
||||
+{
|
||||
+ struct console_atomic_data *d;
|
||||
+ int count = 1;
|
||||
+ int i;
|
||||
+
|
||||
+#ifdef CONFIG_HAVE_NMI
|
||||
+ count = 2;
|
||||
+#endif
|
||||
+
|
||||
+ d = kzalloc(sizeof(*d) * count, GFP_KERNEL);
|
||||
+ if (!d)
|
||||
+ goto err_out;
|
||||
+
|
||||
+ for (i = 0; i < count; i++) {
|
||||
+ if (__alloc_atomic_data(&d[i], flags) != 0)
|
||||
+ goto err_out;
|
||||
+ }
|
||||
+
|
||||
+ return d;
|
||||
+err_out:
|
||||
+ free_atomic_data(d);
|
||||
+ return NULL;
|
||||
+}
|
||||
+#endif /* CONFIG_HAVE_ATOMIC_CONSOLE */
|
||||
+
|
||||
static bool pr_flush(int timeout_ms, bool reset_on_progress);
|
||||
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
|
||||
|
||||
@@ -2445,6 +2525,8 @@ static void printk_start_kthread(struct console *con);
|
||||
#define prb_first_valid_seq(rb) 0
|
||||
#define prb_next_seq(rb) 0
|
||||
|
||||
+#define free_atomic_data(d)
|
||||
+
|
||||
static u64 syslog_seq;
|
||||
|
||||
static size_t record_print_text(const struct printk_record *r,
|
||||
@@ -2463,7 +2545,7 @@ static ssize_t msg_print_ext_body(char *buf, size_t size,
|
||||
static void console_lock_spinning_enable(void) { }
|
||||
static int console_lock_spinning_disable_and_check(void) { return 0; }
|
||||
static void call_console_driver(struct console *con, const char *text, size_t len,
|
||||
- char *dropped_text)
|
||||
+ char *dropped_text, bool atomic_printing)
|
||||
{
|
||||
}
|
||||
static bool suppress_message_printing(int level) { return false; }
|
||||
@@ -2819,10 +2901,20 @@ static inline bool __console_is_usable(short flags)
|
||||
*
|
||||
* Requires holding the console_lock.
|
||||
*/
|
||||
-static inline bool console_is_usable(struct console *con)
|
||||
+static inline bool console_is_usable(struct console *con, bool atomic_printing)
|
||||
{
|
||||
- if (!con->write)
|
||||
+ if (atomic_printing) {
|
||||
+#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
|
||||
+ if (!con->write_atomic)
|
||||
+ return false;
|
||||
+ if (!con->atomic_data)
|
||||
+ return false;
|
||||
+#else
|
||||
+ return false;
|
||||
+#endif
|
||||
+ } else if (!con->write) {
|
||||
return false;
|
||||
+ }
|
||||
|
||||
return __console_is_usable(con->flags);
|
||||
}
|
||||
@@ -2847,6 +2939,66 @@ static void __console_unlock(void)
|
||||
up_console_sem();
|
||||
}
|
||||
|
||||
+static u64 read_console_seq(struct console *con)
|
||||
+{
|
||||
+#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
|
||||
+ unsigned long flags;
|
||||
+ u64 seq2;
|
||||
+ u64 seq;
|
||||
+
|
||||
+ if (!con->atomic_data)
|
||||
+ return con->seq;
|
||||
+
|
||||
+ printk_cpu_sync_get_irqsave(flags);
|
||||
+
|
||||
+ seq = con->seq;
|
||||
+ seq2 = con->atomic_data[0].seq;
|
||||
+ if (seq2 > seq)
|
||||
+ seq = seq2;
|
||||
+#ifdef CONFIG_HAVE_NMI
|
||||
+ seq2 = con->atomic_data[1].seq;
|
||||
+ if (seq2 > seq)
|
||||
+ seq = seq2;
|
||||
+#endif
|
||||
+
|
||||
+ printk_cpu_sync_put_irqrestore(flags);
|
||||
+
|
||||
+ return seq;
|
||||
+#else /* CONFIG_HAVE_ATOMIC_CONSOLE */
|
||||
+ return con->seq;
|
||||
+#endif
|
||||
+}
|
||||
+
|
||||
+static void write_console_seq(struct console *con, u64 val, bool atomic_printing)
|
||||
+{
|
||||
+#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
|
||||
+ unsigned long flags;
|
||||
+ u64 *seq;
|
||||
+
|
||||
+ if (!con->atomic_data) {
|
||||
+ con->seq = val;
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ printk_cpu_sync_get_irqsave(flags);
|
||||
+
|
||||
+ if (atomic_printing) {
|
||||
+ seq = &con->atomic_data[0].seq;
|
||||
+#ifdef CONFIG_HAVE_NMI
|
||||
+ if (in_nmi())
|
||||
+ seq = &con->atomic_data[1].seq;
|
||||
+#endif
|
||||
+ } else {
|
||||
+ seq = &con->seq;
|
||||
+ }
|
||||
+ *seq = val;
|
||||
+
|
||||
+ printk_cpu_sync_put_irqrestore(flags);
|
||||
+#else /* CONFIG_HAVE_ATOMIC_CONSOLE */
|
||||
+ con->seq = val;
|
||||
+#endif
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Print one record for the given console. The record printed is whatever
|
||||
* record is the next available record for the given console.
|
||||
@@ -2859,6 +3011,8 @@ static void __console_unlock(void)
|
||||
* If dropped messages should be printed, @dropped_text is a buffer of size
|
||||
* DROPPED_TEXT_MAX. Otherwise @dropped_text must be NULL.
|
||||
*
|
||||
+ * @atomic_printing specifies if atomic printing should be used.
|
||||
+ *
|
||||
* @handover will be set to true if a printk waiter has taken over the
|
||||
* console_lock, in which case the caller is no longer holding the
|
||||
* console_lock. Otherwise it is set to false. A NULL pointer may be provided
|
||||
@@ -2871,7 +3025,8 @@ static void __console_unlock(void)
|
||||
* Requires con->lock otherwise.
|
||||
*/
|
||||
static bool __console_emit_next_record(struct console *con, char *text, char *ext_text,
|
||||
- char *dropped_text, bool *handover)
|
||||
+ char *dropped_text, bool atomic_printing,
|
||||
+ bool *handover)
|
||||
{
|
||||
static atomic_t panic_console_dropped = ATOMIC_INIT(0);
|
||||
struct printk_info info;
|
||||
@@ -2879,18 +3034,22 @@ static bool __console_emit_next_record(struct console *con, char *text, char *ex
|
||||
unsigned long flags;
|
||||
char *write_text;
|
||||
size_t len;
|
||||
+ u64 seq;
|
||||
|
||||
prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
|
||||
|
||||
if (handover)
|
||||
*handover = false;
|
||||
|
||||
- if (!prb_read_valid(prb, con->seq, &r))
|
||||
+ seq = read_console_seq(con);
|
||||
+
|
||||
+ if (!prb_read_valid(prb, seq, &r))
|
||||
return false;
|
||||
|
||||
- if (con->seq != r.info->seq) {
|
||||
- con->dropped += r.info->seq - con->seq;
|
||||
- con->seq = r.info->seq;
|
||||
+ if (seq != r.info->seq) {
|
||||
+ atomic_long_add((unsigned long)(r.info->seq - seq), &con->dropped);
|
||||
+ write_console_seq(con, r.info->seq, atomic_printing);
|
||||
+ seq = r.info->seq;
|
||||
if (panic_in_progress() &&
|
||||
atomic_fetch_inc_relaxed(&panic_console_dropped) > 10) {
|
||||
suppress_panic_printk = 1;
|
||||
@@ -2900,7 +3059,7 @@ static bool __console_emit_next_record(struct console *con, char *text, char *ex
|
||||
|
||||
/* Skip record that has level above the console loglevel. */
|
||||
if (suppress_message_printing(r.info->level)) {
|
||||
- con->seq++;
|
||||
+ write_console_seq(con, seq + 1, atomic_printing);
|
||||
goto skip;
|
||||
}
|
||||
|
||||
@@ -2932,9 +3091,9 @@ static bool __console_emit_next_record(struct console *con, char *text, char *ex
|
||||
stop_critical_timings();
|
||||
}
|
||||
|
||||
- call_console_driver(con, write_text, len, dropped_text);
|
||||
+ call_console_driver(con, write_text, len, dropped_text, atomic_printing);
|
||||
|
||||
- con->seq++;
|
||||
+ write_console_seq(con, seq + 1, atomic_printing);
|
||||
|
||||
if (handover) {
|
||||
start_critical_timings();
|
||||
@@ -2966,7 +3125,7 @@ static bool console_emit_next_record_transferable(struct console *con, char *tex
|
||||
handover = NULL;
|
||||
}
|
||||
|
||||
- return __console_emit_next_record(con, text, ext_text, dropped_text, handover);
|
||||
+ return __console_emit_next_record(con, text, ext_text, dropped_text, false, handover);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -3014,7 +3173,7 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
|
||||
for_each_console(con) {
|
||||
bool progress;
|
||||
|
||||
- if (!console_is_usable(con))
|
||||
+ if (!console_is_usable(con, false))
|
||||
continue;
|
||||
any_usable = true;
|
||||
|
||||
@@ -3049,6 +3208,68 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
|
||||
return any_usable;
|
||||
}
|
||||
|
||||
+#if defined(CONFIG_HAVE_ATOMIC_CONSOLE) && defined(CONFIG_PRINTK)
|
||||
+static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
|
||||
+ char *dropped_text, bool atomic_printing);
|
||||
+
|
||||
+static void atomic_console_flush_all(void)
|
||||
+{
|
||||
+ unsigned long flags;
|
||||
+ struct console *con;
|
||||
+ bool any_progress;
|
||||
+ int index = 0;
|
||||
+
|
||||
+ if (console_suspended)
|
||||
+ return;
|
||||
+
|
||||
+#ifdef CONFIG_HAVE_NMI
|
||||
+ if (in_nmi())
|
||||
+ index = 1;
|
||||
+#endif
|
||||
+
|
||||
+ printk_cpu_sync_get_irqsave(flags);
|
||||
+
|
||||
+ do {
|
||||
+ any_progress = false;
|
||||
+
|
||||
+ for_each_console(con) {
|
||||
+ bool progress;
|
||||
+
|
||||
+ if (!console_is_usable(con, true))
|
||||
+ continue;
|
||||
+
|
||||
+ if (con->flags & CON_EXTENDED) {
|
||||
+ /* Extended consoles do not print "dropped messages". */
|
||||
+ progress = console_emit_next_record(con,
|
||||
+ &con->atomic_data->text[index],
|
||||
+ &con->atomic_data->ext_text[index],
|
||||
+ NULL,
|
||||
+ true);
|
||||
+ } else {
|
||||
+ progress = console_emit_next_record(con,
|
||||
+ &con->atomic_data->text[index],
|
||||
+ NULL,
|
||||
+ &con->atomic_data->dropped_text[index],
|
||||
+ true);
|
||||
+ }
|
||||
+
|
||||
+ if (!progress)
|
||||
+ continue;
|
||||
+ any_progress = true;
|
||||
+
|
||||
+ touch_softlockup_watchdog_sync();
|
||||
+ clocksource_touch_watchdog();
|
||||
+ rcu_cpu_stall_reset();
|
||||
+ touch_nmi_watchdog();
|
||||
+ }
|
||||
+ } while (any_progress);
|
||||
+
|
||||
+ printk_cpu_sync_put_irqrestore(flags);
|
||||
+}
|
||||
+#else /* CONFIG_HAVE_ATOMIC_CONSOLE && CONFIG_PRINTK */
|
||||
+#define atomic_console_flush_all()
|
||||
+#endif
|
||||
+
|
||||
/**
|
||||
* console_unlock - unlock the console system
|
||||
*
|
||||
@@ -3164,6 +3385,11 @@ void console_unblank(void)
|
||||
*/
|
||||
void console_flush_on_panic(enum con_flush_mode mode)
|
||||
{
|
||||
+ if (mode == CONSOLE_ATOMIC_FLUSH_PENDING) {
|
||||
+ atomic_console_flush_all();
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
/*
|
||||
* If someone else is holding the console lock, trylock will fail
|
||||
* and may_schedule may be set. Ignore and proceed to unlock so
|
||||
@@ -3180,7 +3406,7 @@ void console_flush_on_panic(enum con_flush_mode mode)
|
||||
|
||||
seq = prb_first_valid_seq(prb);
|
||||
for_each_console(c)
|
||||
- c->seq = seq;
|
||||
+ write_console_seq(c, seq, false);
|
||||
}
|
||||
console_unlock();
|
||||
}
|
||||
@@ -3420,19 +3646,22 @@ void register_console(struct console *newcon)
|
||||
console_drivers->next = newcon;
|
||||
}
|
||||
|
||||
- newcon->dropped = 0;
|
||||
+ atomic_long_set(&newcon->dropped, 0);
|
||||
newcon->thread = NULL;
|
||||
newcon->blocked = true;
|
||||
mutex_init(&newcon->lock);
|
||||
+#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
|
||||
+ newcon->atomic_data = NULL;
|
||||
+#endif
|
||||
|
||||
if (newcon->flags & CON_PRINTBUFFER) {
|
||||
/* Get a consistent copy of @syslog_seq. */
|
||||
mutex_lock(&syslog_lock);
|
||||
- newcon->seq = syslog_seq;
|
||||
+ write_console_seq(newcon, syslog_seq, false);
|
||||
mutex_unlock(&syslog_lock);
|
||||
} else {
|
||||
/* Begin with next message. */
|
||||
- newcon->seq = prb_next_seq(prb);
|
||||
+ write_console_seq(newcon, prb_next_seq(prb), false);
|
||||
}
|
||||
|
||||
if (printk_kthreads_available)
|
||||
@@ -3515,6 +3744,10 @@ int unregister_console(struct console *console)
|
||||
|
||||
console_sysfs_notify();
|
||||
|
||||
+#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
|
||||
+ free_atomic_data(console->atomic_data);
|
||||
+#endif
|
||||
+
|
||||
if (console->exit)
|
||||
res = console->exit(console);
|
||||
|
||||
@@ -3645,7 +3878,7 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
|
||||
for_each_console(c) {
|
||||
if (con && con != c)
|
||||
continue;
|
||||
- if (!console_is_usable(c))
|
||||
+ if (!console_is_usable(c, false))
|
||||
continue;
|
||||
printk_seq = c->seq;
|
||||
if (printk_seq < seq)
|
||||
@@ -3734,9 +3967,10 @@ static void printk_fallback_preferred_direct(void)
|
||||
* See __console_emit_next_record() for argument and return details.
|
||||
*/
|
||||
static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
|
||||
- char *dropped_text)
|
||||
+ char *dropped_text, bool atomic_printing)
|
||||
{
|
||||
- return __console_emit_next_record(con, text, ext_text, dropped_text, NULL);
|
||||
+ return __console_emit_next_record(con, text, ext_text, dropped_text,
|
||||
+ atomic_printing, NULL);
|
||||
}
|
||||
|
||||
static bool printer_should_wake(struct console *con, u64 seq)
|
||||
@@ -3777,6 +4011,11 @@ static int printk_kthread_func(void *data)
|
||||
char *text;
|
||||
int error;
|
||||
|
||||
+#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
|
||||
+ if (con->write_atomic)
|
||||
+ con->atomic_data = alloc_atomic_data(con->flags);
|
||||
+#endif
|
||||
+
|
||||
text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
|
||||
if (!text) {
|
||||
con_printk(KERN_ERR, con, "failed to allocate text buffer\n");
|
||||
@@ -3854,7 +4093,7 @@ static int printk_kthread_func(void *data)
|
||||
* which can conditionally invoke cond_resched().
|
||||
*/
|
||||
console_may_schedule = 0;
|
||||
- console_emit_next_record(con, text, ext_text, dropped_text);
|
||||
+ console_emit_next_record(con, text, ext_text, dropped_text, false);
|
||||
|
||||
seq = con->seq;
|
||||
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
From f760da57a984738f88b96dfa5edb89d0723901ca Mon Sep 17 00:00:00 2001
|
||||
From f089c645b517d22f97433deffaaedc9fd6f9b598 Mon Sep 17 00:00:00 2001
|
||||
From: Frederic Weisbecker <frederic@kernel.org>
|
||||
Date: Tue, 5 Apr 2022 03:07:51 +0200
|
||||
Subject: [PATCH 13/62] rcutorture: Also force sched priority to timersd on
|
||||
Subject: [PATCH 020/195] rcutorture: Also force sched priority to timersd on
|
||||
boosting test.
|
||||
|
||||
ksoftirqd is statically boosted to the priority level right above the
|
||||
|
@ -46,10 +46,10 @@ index 0efba74a835c..f459b0f27c94 100644
|
|||
extern void raise_hrtimer_softirq(void);
|
||||
|
||||
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
|
||||
index 503c2aa845a4..dcd8c0e44c00 100644
|
||||
index ade42d6a9d9b..eebb9b4548fb 100644
|
||||
--- a/kernel/rcu/rcutorture.c
|
||||
+++ b/kernel/rcu/rcutorture.c
|
||||
@@ -2363,6 +2363,12 @@ static int rcutorture_booster_init(unsigned int cpu)
|
||||
@@ -2408,6 +2408,12 @@ static int rcutorture_booster_init(unsigned int cpu)
|
||||
WARN_ON_ONCE(!t);
|
||||
sp.sched_priority = 2;
|
||||
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
||||
|
@ -63,10 +63,10 @@ index 503c2aa845a4..dcd8c0e44c00 100644
|
|||
|
||||
/* Don't allow time recalculation while creating a new task. */
|
||||
diff --git a/kernel/softirq.c b/kernel/softirq.c
|
||||
index ed6d7c41aa17..1892af494cdd 100644
|
||||
index c29c30106eb8..1277abc94228 100644
|
||||
--- a/kernel/softirq.c
|
||||
+++ b/kernel/softirq.c
|
||||
@@ -638,7 +638,7 @@ static inline void tick_irq_exit(void)
|
||||
@@ -620,7 +620,7 @@ static inline void tick_irq_exit(void)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RT
|
|
@ -1,937 +0,0 @@
|
|||
From eda5ed51c656510b41e8003537c4c101dd18e51f Mon Sep 17 00:00:00 2001
|
||||
From: John Ogness <john.ogness@linutronix.de>
|
||||
Date: Fri, 4 Feb 2022 16:01:17 +0106
|
||||
Subject: [PATCH 21/62] serial: 8250: implement write_atomic
|
||||
|
||||
Implement a non-sleeping NMI-safe write_atomic() console function in
|
||||
order to support atomic console printing during a panic.
|
||||
|
||||
Trasmitting data requires disabling interrupts. Since write_atomic()
|
||||
can be called from any context, it may be called while another CPU
|
||||
is executing in console code. In order to maintain the correct state
|
||||
of the IER register, use the global cpu_sync to synchronize all
|
||||
access to the IER register. This synchronization is only necessary
|
||||
for serial ports that are being used as consoles.
|
||||
|
||||
The global cpu_sync is also used to synchronize between the write()
|
||||
and write_atomic() callbacks. write() synchronizes per character,
|
||||
write_atomic() synchronizes per line.
|
||||
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/8250/8250.h | 41 ++++-
|
||||
drivers/tty/serial/8250/8250_aspeed_vuart.c | 2 +-
|
||||
drivers/tty/serial/8250/8250_bcm7271.c | 21 ++-
|
||||
drivers/tty/serial/8250/8250_core.c | 24 ++-
|
||||
drivers/tty/serial/8250/8250_exar.c | 4 +-
|
||||
drivers/tty/serial/8250/8250_fsl.c | 3 +-
|
||||
drivers/tty/serial/8250/8250_ingenic.c | 3 +-
|
||||
drivers/tty/serial/8250/8250_mtk.c | 32 +++-
|
||||
drivers/tty/serial/8250/8250_omap.c | 18 +--
|
||||
drivers/tty/serial/8250/8250_port.c | 158 ++++++++++++++++----
|
||||
drivers/tty/serial/8250/Kconfig | 1 +
|
||||
include/linux/serial_8250.h | 5 +
|
||||
12 files changed, 261 insertions(+), 51 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
|
||||
index eeb7b43ebe53..b17715d340c3 100644
|
||||
--- a/drivers/tty/serial/8250/8250.h
|
||||
+++ b/drivers/tty/serial/8250/8250.h
|
||||
@@ -176,12 +176,49 @@ static inline void serial_dl_write(struct uart_8250_port *up, int value)
|
||||
up->dl_write(up, value);
|
||||
}
|
||||
|
||||
+static inline int serial8250_in_IER(struct uart_8250_port *up)
|
||||
+{
|
||||
+ struct uart_port *port = &up->port;
|
||||
+ unsigned long flags;
|
||||
+ bool is_console;
|
||||
+ int ier;
|
||||
+
|
||||
+ is_console = uart_console(port);
|
||||
+
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_get_irqsave(flags);
|
||||
+
|
||||
+ ier = serial_in(up, UART_IER);
|
||||
+
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_put_irqrestore(flags);
|
||||
+
|
||||
+ return ier;
|
||||
+}
|
||||
+
|
||||
+static inline void serial8250_set_IER(struct uart_8250_port *up, int ier)
|
||||
+{
|
||||
+ struct uart_port *port = &up->port;
|
||||
+ unsigned long flags;
|
||||
+ bool is_console;
|
||||
+
|
||||
+ is_console = uart_console(port);
|
||||
+
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_get_irqsave(flags);
|
||||
+
|
||||
+ serial_out(up, UART_IER, ier);
|
||||
+
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_put_irqrestore(flags);
|
||||
+}
|
||||
+
|
||||
static inline bool serial8250_set_THRI(struct uart_8250_port *up)
|
||||
{
|
||||
if (up->ier & UART_IER_THRI)
|
||||
return false;
|
||||
up->ier |= UART_IER_THRI;
|
||||
- serial_out(up, UART_IER, up->ier);
|
||||
+ serial8250_set_IER(up, up->ier);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -190,7 +227,7 @@ static inline bool serial8250_clear_THRI(struct uart_8250_port *up)
|
||||
if (!(up->ier & UART_IER_THRI))
|
||||
return false;
|
||||
up->ier &= ~UART_IER_THRI;
|
||||
- serial_out(up, UART_IER, up->ier);
|
||||
+ serial8250_set_IER(up, up->ier);
|
||||
return true;
|
||||
}
|
||||
|
||||
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
|
||||
index 9d2a7856784f..7cc6b527c088 100644
|
||||
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
|
||||
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
|
||||
@@ -278,7 +278,7 @@ static void __aspeed_vuart_set_throttle(struct uart_8250_port *up,
|
||||
up->ier &= ~irqs;
|
||||
if (!throttle)
|
||||
up->ier |= irqs;
|
||||
- serial_out(up, UART_IER, up->ier);
|
||||
+ serial8250_set_IER(up, up->ier);
|
||||
}
|
||||
static void aspeed_vuart_set_throttle(struct uart_port *port, bool throttle)
|
||||
{
|
||||
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
|
||||
index ffc7f67e27e3..8b211e668bc0 100644
|
||||
--- a/drivers/tty/serial/8250/8250_bcm7271.c
|
||||
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
|
||||
@@ -609,7 +609,7 @@ static int brcmuart_startup(struct uart_port *port)
|
||||
* will handle this.
|
||||
*/
|
||||
up->ier &= ~UART_IER_RDI;
|
||||
- serial_port_out(port, UART_IER, up->ier);
|
||||
+ serial8250_set_IER(up, up->ier);
|
||||
|
||||
priv->tx_running = false;
|
||||
priv->dma.rx_dma = NULL;
|
||||
@@ -775,10 +775,12 @@ static int brcmuart_handle_irq(struct uart_port *p)
|
||||
unsigned int iir = serial_port_in(p, UART_IIR);
|
||||
struct brcmuart_priv *priv = p->private_data;
|
||||
struct uart_8250_port *up = up_to_u8250p(p);
|
||||
+ unsigned long cs_flags;
|
||||
unsigned int status;
|
||||
unsigned long flags;
|
||||
unsigned int ier;
|
||||
unsigned int mcr;
|
||||
+ bool is_console;
|
||||
int handled = 0;
|
||||
|
||||
/*
|
||||
@@ -789,6 +791,10 @@ static int brcmuart_handle_irq(struct uart_port *p)
|
||||
spin_lock_irqsave(&p->lock, flags);
|
||||
status = serial_port_in(p, UART_LSR);
|
||||
if ((status & UART_LSR_DR) == 0) {
|
||||
+ is_console = uart_console(p);
|
||||
+
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_get_irqsave(cs_flags);
|
||||
|
||||
ier = serial_port_in(p, UART_IER);
|
||||
/*
|
||||
@@ -809,6 +815,9 @@ static int brcmuart_handle_irq(struct uart_port *p)
|
||||
serial_port_in(p, UART_RX);
|
||||
}
|
||||
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_put_irqrestore(cs_flags);
|
||||
+
|
||||
handled = 1;
|
||||
}
|
||||
spin_unlock_irqrestore(&p->lock, flags);
|
||||
@@ -823,8 +832,10 @@ static enum hrtimer_restart brcmuart_hrtimer_func(struct hrtimer *t)
|
||||
struct brcmuart_priv *priv = container_of(t, struct brcmuart_priv, hrt);
|
||||
struct uart_port *p = priv->up;
|
||||
struct uart_8250_port *up = up_to_u8250p(p);
|
||||
+ unsigned long cs_flags;
|
||||
unsigned int status;
|
||||
unsigned long flags;
|
||||
+ bool is_console;
|
||||
|
||||
if (priv->shutdown)
|
||||
return HRTIMER_NORESTART;
|
||||
@@ -846,12 +857,20 @@ static enum hrtimer_restart brcmuart_hrtimer_func(struct hrtimer *t)
|
||||
/* re-enable receive unless upper layer has disabled it */
|
||||
if ((up->ier & (UART_IER_RLSI | UART_IER_RDI)) ==
|
||||
(UART_IER_RLSI | UART_IER_RDI)) {
|
||||
+ is_console = uart_console(p);
|
||||
+
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_get_irqsave(cs_flags);
|
||||
+
|
||||
status = serial_port_in(p, UART_IER);
|
||||
status |= (UART_IER_RLSI | UART_IER_RDI);
|
||||
serial_port_out(p, UART_IER, status);
|
||||
status = serial_port_in(p, UART_MCR);
|
||||
status |= UART_MCR_RTS;
|
||||
serial_port_out(p, UART_MCR, status);
|
||||
+
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_put_irqrestore(cs_flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&p->lock, flags);
|
||||
return HRTIMER_NORESTART;
|
||||
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
|
||||
index 81a5dab1a828..536f639ff56c 100644
|
||||
--- a/drivers/tty/serial/8250/8250_core.c
|
||||
+++ b/drivers/tty/serial/8250/8250_core.c
|
||||
@@ -255,8 +255,11 @@ static void serial8250_timeout(struct timer_list *t)
|
||||
static void serial8250_backup_timeout(struct timer_list *t)
|
||||
{
|
||||
struct uart_8250_port *up = from_timer(up, t, timer);
|
||||
+ struct uart_port *port = &up->port;
|
||||
unsigned int iir, ier = 0, lsr;
|
||||
+ unsigned long cs_flags;
|
||||
unsigned long flags;
|
||||
+ bool is_console;
|
||||
|
||||
spin_lock_irqsave(&up->port.lock, flags);
|
||||
|
||||
@@ -265,8 +268,16 @@ static void serial8250_backup_timeout(struct timer_list *t)
|
||||
* based handler.
|
||||
*/
|
||||
if (up->port.irq) {
|
||||
+ is_console = uart_console(port);
|
||||
+
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_get_irqsave(cs_flags);
|
||||
+
|
||||
ier = serial_in(up, UART_IER);
|
||||
serial_out(up, UART_IER, 0);
|
||||
+
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_put_irqrestore(cs_flags);
|
||||
}
|
||||
|
||||
iir = serial_in(up, UART_IIR);
|
||||
@@ -289,7 +300,7 @@ static void serial8250_backup_timeout(struct timer_list *t)
|
||||
serial8250_tx_chars(up);
|
||||
|
||||
if (up->port.irq)
|
||||
- serial_out(up, UART_IER, ier);
|
||||
+ serial8250_set_IER(up, ier);
|
||||
|
||||
spin_unlock_irqrestore(&up->port.lock, flags);
|
||||
|
||||
@@ -575,6 +586,14 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
|
||||
|
||||
#ifdef CONFIG_SERIAL_8250_CONSOLE
|
||||
|
||||
+static void univ8250_console_write_atomic(struct console *co, const char *s,
|
||||
+ unsigned int count)
|
||||
+{
|
||||
+ struct uart_8250_port *up = &serial8250_ports[co->index];
|
||||
+
|
||||
+ serial8250_console_write_atomic(up, s, count);
|
||||
+}
|
||||
+
|
||||
static void univ8250_console_write(struct console *co, const char *s,
|
||||
unsigned int count)
|
||||
{
|
||||
@@ -668,6 +687,7 @@ static int univ8250_console_match(struct console *co, char *name, int idx,
|
||||
|
||||
static struct console univ8250_console = {
|
||||
.name = "ttyS",
|
||||
+ .write_atomic = univ8250_console_write_atomic,
|
||||
.write = univ8250_console_write,
|
||||
.device = uart_console_device,
|
||||
.setup = univ8250_console_setup,
|
||||
@@ -961,7 +981,7 @@ static void serial_8250_overrun_backoff_work(struct work_struct *work)
|
||||
spin_lock_irqsave(&port->lock, flags);
|
||||
up->ier |= UART_IER_RLSI | UART_IER_RDI;
|
||||
up->port.read_status_mask |= UART_LSR_DR;
|
||||
- serial_out(up, UART_IER, up->ier);
|
||||
+ serial8250_set_IER(up, up->ier);
|
||||
spin_unlock_irqrestore(&port->lock, flags);
|
||||
}
|
||||
|
||||
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
|
||||
index b406cba10b0e..246c32c75a4c 100644
|
||||
--- a/drivers/tty/serial/8250/8250_exar.c
|
||||
+++ b/drivers/tty/serial/8250/8250_exar.c
|
||||
@@ -189,6 +189,8 @@ static void xr17v35x_set_divisor(struct uart_port *p, unsigned int baud,
|
||||
|
||||
static int xr17v35x_startup(struct uart_port *port)
|
||||
{
|
||||
+ struct uart_8250_port *up = up_to_u8250p(port);
|
||||
+
|
||||
/*
|
||||
* First enable access to IER [7:5], ISR [5:4], FCR [5:4],
|
||||
* MCR [7:5] and MSR [7:0]
|
||||
@@ -199,7 +201,7 @@ static int xr17v35x_startup(struct uart_port *port)
|
||||
* Make sure all interrups are masked until initialization is
|
||||
* complete and the FIFOs are cleared
|
||||
*/
|
||||
- serial_port_out(port, UART_IER, 0);
|
||||
+ serial8250_set_IER(up, 0);
|
||||
|
||||
return serial8250_do_startup(port);
|
||||
}
|
||||
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
|
||||
index 8adfaa183f77..eaf148245a10 100644
|
||||
--- a/drivers/tty/serial/8250/8250_fsl.c
|
||||
+++ b/drivers/tty/serial/8250/8250_fsl.c
|
||||
@@ -58,7 +58,8 @@ int fsl8250_handle_irq(struct uart_port *port)
|
||||
if ((orig_lsr & UART_LSR_OE) && (up->overrun_backoff_time_ms > 0)) {
|
||||
unsigned long delay;
|
||||
|
||||
- up->ier = port->serial_in(port, UART_IER);
|
||||
+ up->ier = serial8250_in_IER(up);
|
||||
+
|
||||
if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
|
||||
port->ops->stop_rx(port);
|
||||
} else {
|
||||
diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
|
||||
index 2b2f5d8d24b9..2b78e6c394fb 100644
|
||||
--- a/drivers/tty/serial/8250/8250_ingenic.c
|
||||
+++ b/drivers/tty/serial/8250/8250_ingenic.c
|
||||
@@ -146,6 +146,7 @@ OF_EARLYCON_DECLARE(x1000_uart, "ingenic,x1000-uart",
|
||||
|
||||
static void ingenic_uart_serial_out(struct uart_port *p, int offset, int value)
|
||||
{
|
||||
+ struct uart_8250_port *up = up_to_u8250p(p);
|
||||
int ier;
|
||||
|
||||
switch (offset) {
|
||||
@@ -167,7 +168,7 @@ static void ingenic_uart_serial_out(struct uart_port *p, int offset, int value)
|
||||
* If we have enabled modem status IRQs we should enable
|
||||
* modem mode.
|
||||
*/
|
||||
- ier = p->serial_in(p, UART_IER);
|
||||
+ ier = serial8250_in_IER(up);
|
||||
|
||||
if (ier & UART_IER_MSI)
|
||||
value |= UART_MCR_MDCE | UART_MCR_FCM;
|
||||
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
|
||||
index fb1d5ec0940e..3e7203909d6a 100644
|
||||
--- a/drivers/tty/serial/8250/8250_mtk.c
|
||||
+++ b/drivers/tty/serial/8250/8250_mtk.c
|
||||
@@ -222,12 +222,40 @@ static void mtk8250_shutdown(struct uart_port *port)
|
||||
|
||||
static void mtk8250_disable_intrs(struct uart_8250_port *up, int mask)
|
||||
{
|
||||
- serial_out(up, UART_IER, serial_in(up, UART_IER) & (~mask));
|
||||
+ struct uart_port *port = &up->port;
|
||||
+ unsigned long flags;
|
||||
+ bool is_console;
|
||||
+ int ier;
|
||||
+
|
||||
+ is_console = uart_console(port);
|
||||
+
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_get_irqsave(flags);
|
||||
+
|
||||
+ ier = serial_in(up, UART_IER);
|
||||
+ serial_out(up, UART_IER, ier & (~mask));
|
||||
+
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_put_irqrestore(flags);
|
||||
}
|
||||
|
||||
static void mtk8250_enable_intrs(struct uart_8250_port *up, int mask)
|
||||
{
|
||||
- serial_out(up, UART_IER, serial_in(up, UART_IER) | mask);
|
||||
+ struct uart_port *port = &up->port;
|
||||
+ unsigned long flags;
|
||||
+ bool is_console;
|
||||
+ int ier;
|
||||
+
|
||||
+ is_console = uart_console(port);
|
||||
+
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_get_irqsave(flags);
|
||||
+
|
||||
+ ier = serial_in(up, UART_IER);
|
||||
+ serial_out(up, UART_IER, ier | mask);
|
||||
+
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_put_irqrestore(flags);
|
||||
}
|
||||
|
||||
static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
|
||||
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
|
||||
index 0b04d810b3e6..2b8ad5176399 100644
|
||||
--- a/drivers/tty/serial/8250/8250_omap.c
|
||||
+++ b/drivers/tty/serial/8250/8250_omap.c
|
||||
@@ -330,7 +330,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
|
||||
/* drop TCR + TLR access, we setup XON/XOFF later */
|
||||
serial8250_out_MCR(up, mcr);
|
||||
|
||||
- serial_out(up, UART_IER, up->ier);
|
||||
+ serial8250_set_IER(up, up->ier);
|
||||
|
||||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
|
||||
serial_dl_write(up, priv->quot);
|
||||
@@ -520,7 +520,7 @@ static void omap_8250_pm(struct uart_port *port, unsigned int state,
|
||||
serial_out(up, UART_EFR, efr | UART_EFR_ECB);
|
||||
serial_out(up, UART_LCR, 0);
|
||||
|
||||
- serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0);
|
||||
+ serial8250_set_IER(up, (state != 0) ? UART_IERX_SLEEP : 0);
|
||||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
|
||||
serial_out(up, UART_EFR, efr);
|
||||
serial_out(up, UART_LCR, 0);
|
||||
@@ -703,7 +703,7 @@ static int omap_8250_startup(struct uart_port *port)
|
||||
goto err;
|
||||
|
||||
up->ier = UART_IER_RLSI | UART_IER_RDI;
|
||||
- serial_out(up, UART_IER, up->ier);
|
||||
+ serial8250_set_IER(up, up->ier);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
up->capabilities |= UART_CAP_RPM;
|
||||
@@ -744,7 +744,7 @@ static void omap_8250_shutdown(struct uart_port *port)
|
||||
serial_out(up, UART_OMAP_EFR2, 0x0);
|
||||
|
||||
up->ier = 0;
|
||||
- serial_out(up, UART_IER, 0);
|
||||
+ serial8250_set_IER(up, 0);
|
||||
|
||||
if (up->dma)
|
||||
serial8250_release_dma(up);
|
||||
@@ -792,7 +792,7 @@ static void omap_8250_unthrottle(struct uart_port *port)
|
||||
up->dma->rx_dma(up);
|
||||
up->ier |= UART_IER_RLSI | UART_IER_RDI;
|
||||
port->read_status_mask |= UART_LSR_DR;
|
||||
- serial_out(up, UART_IER, up->ier);
|
||||
+ serial8250_set_IER(up, up->ier);
|
||||
spin_unlock_irqrestore(&port->lock, flags);
|
||||
|
||||
pm_runtime_mark_last_busy(port->dev);
|
||||
@@ -883,7 +883,7 @@ static void __dma_rx_complete(void *param)
|
||||
__dma_rx_do_complete(p);
|
||||
if (!priv->throttled) {
|
||||
p->ier |= UART_IER_RLSI | UART_IER_RDI;
|
||||
- serial_out(p, UART_IER, p->ier);
|
||||
+ serial8250_set_IER(p, p->ier);
|
||||
if (!(priv->habit & UART_HAS_EFR2))
|
||||
omap_8250_rx_dma(p);
|
||||
}
|
||||
@@ -940,7 +940,7 @@ static int omap_8250_rx_dma(struct uart_8250_port *p)
|
||||
* callback to run.
|
||||
*/
|
||||
p->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
|
||||
- serial_out(p, UART_IER, p->ier);
|
||||
+ serial8250_set_IER(p, p->ier);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
@@ -1153,12 +1153,12 @@ static void am654_8250_handle_rx_dma(struct uart_8250_port *up, u8 iir,
|
||||
* periodic timeouts, re-enable interrupts.
|
||||
*/
|
||||
up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
|
||||
- serial_out(up, UART_IER, up->ier);
|
||||
+ serial8250_set_IER(up, up->ier);
|
||||
omap_8250_rx_dma_flush(up);
|
||||
serial_in(up, UART_IIR);
|
||||
serial_out(up, UART_OMAP_EFR2, 0x0);
|
||||
up->ier |= UART_IER_RLSI | UART_IER_RDI;
|
||||
- serial_out(up, UART_IER, up->ier);
|
||||
+ serial8250_set_IER(up, up->ier);
|
||||
}
|
||||
}
|
||||
|
||||
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
|
||||
index 8efe31448df3..975c16267196 100644
|
||||
--- a/drivers/tty/serial/8250/8250_port.c
|
||||
+++ b/drivers/tty/serial/8250/8250_port.c
|
||||
@@ -744,7 +744,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
|
||||
serial_out(p, UART_EFR, UART_EFR_ECB);
|
||||
serial_out(p, UART_LCR, 0);
|
||||
}
|
||||
- serial_out(p, UART_IER, sleep ? UART_IERX_SLEEP : 0);
|
||||
+ serial8250_set_IER(p, sleep ? UART_IERX_SLEEP : 0);
|
||||
if (p->capabilities & UART_CAP_EFR) {
|
||||
serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
|
||||
serial_out(p, UART_EFR, efr);
|
||||
@@ -755,12 +755,29 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
|
||||
serial8250_rpm_put(p);
|
||||
}
|
||||
|
||||
-static void serial8250_clear_IER(struct uart_8250_port *up)
|
||||
+static unsigned int serial8250_clear_IER(struct uart_8250_port *up)
|
||||
{
|
||||
+ struct uart_port *port = &up->port;
|
||||
+ unsigned int clearval = 0;
|
||||
+ unsigned long flags;
|
||||
+ bool is_console;
|
||||
+ unsigned int prior;
|
||||
+
|
||||
+ is_console = uart_console(port);
|
||||
+
|
||||
if (up->capabilities & UART_CAP_UUE)
|
||||
- serial_out(up, UART_IER, UART_IER_UUE);
|
||||
- else
|
||||
- serial_out(up, UART_IER, 0);
|
||||
+ clearval = UART_IER_UUE;
|
||||
+
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_get_irqsave(flags);
|
||||
+
|
||||
+ prior = serial_in(up, UART_IER);
|
||||
+ serial_out(up, UART_IER, clearval);
|
||||
+
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_put_irqrestore(flags);
|
||||
+
|
||||
+ return prior;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SERIAL_8250_RSA
|
||||
@@ -1026,8 +1043,11 @@ static int broken_efr(struct uart_8250_port *up)
|
||||
*/
|
||||
static void autoconfig_16550a(struct uart_8250_port *up)
|
||||
{
|
||||
+ struct uart_port *port = &up->port;
|
||||
unsigned char status1, status2;
|
||||
unsigned int iersave;
|
||||
+ unsigned long flags;
|
||||
+ bool is_console;
|
||||
|
||||
up->port.type = PORT_16550A;
|
||||
up->capabilities |= UART_CAP_FIFO;
|
||||
@@ -1139,6 +1159,11 @@ static void autoconfig_16550a(struct uart_8250_port *up)
|
||||
return;
|
||||
}
|
||||
|
||||
+ is_console = uart_console(port);
|
||||
+
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_get_irqsave(flags);
|
||||
+
|
||||
/*
|
||||
* Try writing and reading the UART_IER_UUE bit (b6).
|
||||
* If it works, this is probably one of the Xscale platform's
|
||||
@@ -1174,6 +1199,9 @@ static void autoconfig_16550a(struct uart_8250_port *up)
|
||||
}
|
||||
serial_out(up, UART_IER, iersave);
|
||||
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_put_irqrestore(flags);
|
||||
+
|
||||
/*
|
||||
* We distinguish between 16550A and U6 16550A by counting
|
||||
* how many bytes are in the FIFO.
|
||||
@@ -1196,8 +1224,10 @@ static void autoconfig(struct uart_8250_port *up)
|
||||
unsigned char status1, scratch, scratch2, scratch3;
|
||||
unsigned char save_lcr, save_mcr;
|
||||
struct uart_port *port = &up->port;
|
||||
+ unsigned long cs_flags;
|
||||
unsigned long flags;
|
||||
unsigned int old_capabilities;
|
||||
+ bool is_console;
|
||||
|
||||
if (!port->iobase && !port->mapbase && !port->membase)
|
||||
return;
|
||||
@@ -1215,6 +1245,11 @@ static void autoconfig(struct uart_8250_port *up)
|
||||
up->bugs = 0;
|
||||
|
||||
if (!(port->flags & UPF_BUGGY_UART)) {
|
||||
+ is_console = uart_console(port);
|
||||
+
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_get_irqsave(cs_flags);
|
||||
+
|
||||
/*
|
||||
* Do a simple existence test first; if we fail this,
|
||||
* there's no point trying anything else.
|
||||
@@ -1244,6 +1279,10 @@ static void autoconfig(struct uart_8250_port *up)
|
||||
#endif
|
||||
scratch3 = serial_in(up, UART_IER) & 0x0f;
|
||||
serial_out(up, UART_IER, scratch);
|
||||
+
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_put_irqrestore(cs_flags);
|
||||
+
|
||||
if (scratch2 != 0 || scratch3 != 0x0F) {
|
||||
/*
|
||||
* We failed; there's nothing here
|
||||
@@ -1367,7 +1406,9 @@ static void autoconfig_irq(struct uart_8250_port *up)
|
||||
unsigned char save_mcr, save_ier;
|
||||
unsigned char save_ICP = 0;
|
||||
unsigned int ICP = 0;
|
||||
+ unsigned long flags;
|
||||
unsigned long irqs;
|
||||
+ bool is_console;
|
||||
int irq;
|
||||
|
||||
if (port->flags & UPF_FOURPORT) {
|
||||
@@ -1377,8 +1418,12 @@ static void autoconfig_irq(struct uart_8250_port *up)
|
||||
inb_p(ICP);
|
||||
}
|
||||
|
||||
- if (uart_console(port))
|
||||
+ is_console = uart_console(port);
|
||||
+
|
||||
+ if (is_console) {
|
||||
console_lock();
|
||||
+ printk_cpu_sync_get_irqsave(flags);
|
||||
+ }
|
||||
|
||||
/* forget possible initially masked and pending IRQ */
|
||||
probe_irq_off(probe_irq_on());
|
||||
@@ -1410,8 +1455,10 @@ static void autoconfig_irq(struct uart_8250_port *up)
|
||||
if (port->flags & UPF_FOURPORT)
|
||||
outb_p(save_ICP, ICP);
|
||||
|
||||
- if (uart_console(port))
|
||||
+ if (is_console) {
|
||||
+ printk_cpu_sync_put_irqrestore(flags);
|
||||
console_unlock();
|
||||
+ }
|
||||
|
||||
port->irq = (irq > 0) ? irq : 0;
|
||||
}
|
||||
@@ -1424,7 +1471,7 @@ static void serial8250_stop_rx(struct uart_port *port)
|
||||
|
||||
up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
|
||||
up->port.read_status_mask &= ~UART_LSR_DR;
|
||||
- serial_port_out(port, UART_IER, up->ier);
|
||||
+ serial8250_set_IER(up, up->ier);
|
||||
|
||||
serial8250_rpm_put(up);
|
||||
}
|
||||
@@ -1454,7 +1501,7 @@ void serial8250_em485_stop_tx(struct uart_8250_port *p)
|
||||
serial8250_clear_and_reinit_fifos(p);
|
||||
|
||||
p->ier |= UART_IER_RLSI | UART_IER_RDI;
|
||||
- serial_port_out(&p->port, UART_IER, p->ier);
|
||||
+ serial8250_set_IER(p, p->ier);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx);
|
||||
@@ -1703,7 +1750,7 @@ static void serial8250_disable_ms(struct uart_port *port)
|
||||
mctrl_gpio_disable_ms(up->gpios);
|
||||
|
||||
up->ier &= ~UART_IER_MSI;
|
||||
- serial_port_out(port, UART_IER, up->ier);
|
||||
+ serial8250_set_IER(up, up->ier);
|
||||
}
|
||||
|
||||
static void serial8250_enable_ms(struct uart_port *port)
|
||||
@@ -1719,7 +1766,7 @@ static void serial8250_enable_ms(struct uart_port *port)
|
||||
up->ier |= UART_IER_MSI;
|
||||
|
||||
serial8250_rpm_get(up);
|
||||
- serial_port_out(port, UART_IER, up->ier);
|
||||
+ serial8250_set_IER(up, up->ier);
|
||||
serial8250_rpm_put(up);
|
||||
}
|
||||
|
||||
@@ -2174,8 +2221,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
|
||||
/*
|
||||
* First save the IER then disable the interrupts
|
||||
*/
|
||||
- ier = serial_port_in(port, UART_IER);
|
||||
- serial8250_clear_IER(up);
|
||||
+ ier = serial8250_clear_IER(up);
|
||||
|
||||
wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
|
||||
/*
|
||||
@@ -2188,7 +2234,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
|
||||
* and restore the IER
|
||||
*/
|
||||
wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
|
||||
- serial_port_out(port, UART_IER, ier);
|
||||
+ serial8250_set_IER(up, ier);
|
||||
serial8250_rpm_put(up);
|
||||
}
|
||||
|
||||
@@ -2197,8 +2243,10 @@ static void serial8250_put_poll_char(struct uart_port *port,
|
||||
int serial8250_do_startup(struct uart_port *port)
|
||||
{
|
||||
struct uart_8250_port *up = up_to_u8250p(port);
|
||||
+ unsigned long cs_flags;
|
||||
unsigned long flags;
|
||||
unsigned char iir;
|
||||
+ bool is_console;
|
||||
int retval;
|
||||
u16 lsr;
|
||||
|
||||
@@ -2219,7 +2267,7 @@ int serial8250_do_startup(struct uart_port *port)
|
||||
up->acr = 0;
|
||||
serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
|
||||
serial_port_out(port, UART_EFR, UART_EFR_ECB);
|
||||
- serial_port_out(port, UART_IER, 0);
|
||||
+ serial8250_set_IER(up, 0);
|
||||
serial_port_out(port, UART_LCR, 0);
|
||||
serial_icr_write(up, UART_CSR, 0); /* Reset the UART */
|
||||
serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
|
||||
@@ -2229,7 +2277,7 @@ int serial8250_do_startup(struct uart_port *port)
|
||||
|
||||
if (port->type == PORT_DA830) {
|
||||
/* Reset the port */
|
||||
- serial_port_out(port, UART_IER, 0);
|
||||
+ serial8250_set_IER(up, 0);
|
||||
serial_port_out(port, UART_DA830_PWREMU_MGMT, 0);
|
||||
mdelay(10);
|
||||
|
||||
@@ -2328,6 +2376,8 @@ int serial8250_do_startup(struct uart_port *port)
|
||||
if (retval)
|
||||
goto out;
|
||||
|
||||
+ is_console = uart_console(port);
|
||||
+
|
||||
if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
|
||||
unsigned char iir1;
|
||||
|
||||
@@ -2344,6 +2394,9 @@ int serial8250_do_startup(struct uart_port *port)
|
||||
*/
|
||||
spin_lock_irqsave(&port->lock, flags);
|
||||
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_get_irqsave(cs_flags);
|
||||
+
|
||||
wait_for_xmitr(up, UART_LSR_THRE);
|
||||
serial_port_out_sync(port, UART_IER, UART_IER_THRI);
|
||||
udelay(1); /* allow THRE to set */
|
||||
@@ -2354,6 +2407,9 @@ int serial8250_do_startup(struct uart_port *port)
|
||||
iir = serial_port_in(port, UART_IIR);
|
||||
serial_port_out(port, UART_IER, 0);
|
||||
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_put_irqrestore(cs_flags);
|
||||
+
|
||||
spin_unlock_irqrestore(&port->lock, flags);
|
||||
|
||||
if (port->irqflags & IRQF_SHARED)
|
||||
@@ -2408,10 +2464,14 @@ int serial8250_do_startup(struct uart_port *port)
|
||||
* Do a quick test to see if we receive an interrupt when we enable
|
||||
* the TX irq.
|
||||
*/
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_get_irqsave(cs_flags);
|
||||
serial_port_out(port, UART_IER, UART_IER_THRI);
|
||||
lsr = serial_port_in(port, UART_LSR);
|
||||
iir = serial_port_in(port, UART_IIR);
|
||||
serial_port_out(port, UART_IER, 0);
|
||||
+ if (is_console)
|
||||
+ printk_cpu_sync_put_irqrestore(cs_flags);
|
||||
|
||||
if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) {
|
||||
if (!(up->bugs & UART_BUG_TXEN)) {
|
||||
@@ -2443,7 +2503,7 @@ int serial8250_do_startup(struct uart_port *port)
|
||||
if (up->dma) {
|
||||
const char *msg = NULL;
|
||||
|
||||
- if (uart_console(port))
|
||||
+ if (is_console)
|
||||
msg = "forbid DMA for kernel console";
|
||||
else if (serial8250_request_dma(up))
|
||||
msg = "failed to request DMA";
|
||||
@@ -2494,7 +2554,7 @@ void serial8250_do_shutdown(struct uart_port *port)
|
||||
*/
|
||||
spin_lock_irqsave(&port->lock, flags);
|
||||
up->ier = 0;
|
||||
- serial_port_out(port, UART_IER, 0);
|
||||
+ serial8250_set_IER(up, 0);
|
||||
spin_unlock_irqrestore(&port->lock, flags);
|
||||
|
||||
synchronize_irq(port->irq);
|
||||
@@ -2856,7 +2916,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
if (up->capabilities & UART_CAP_RTOIE)
|
||||
up->ier |= UART_IER_RTOIE;
|
||||
|
||||
- serial_port_out(port, UART_IER, up->ier);
|
||||
+ serial8250_set_IER(up, up->ier);
|
||||
|
||||
if (up->capabilities & UART_CAP_EFR) {
|
||||
unsigned char efr = 0;
|
||||
@@ -3321,7 +3381,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_defaults);
|
||||
|
||||
#ifdef CONFIG_SERIAL_8250_CONSOLE
|
||||
|
||||
-static void serial8250_console_putchar(struct uart_port *port, unsigned char ch)
|
||||
+static void serial8250_console_putchar_locked(struct uart_port *port, unsigned char ch)
|
||||
{
|
||||
struct uart_8250_port *up = up_to_u8250p(port);
|
||||
|
||||
@@ -3329,6 +3389,18 @@ static void serial8250_console_putchar(struct uart_port *port, unsigned char ch)
|
||||
serial_port_out(port, UART_TX, ch);
|
||||
}
|
||||
|
||||
+static void serial8250_console_putchar(struct uart_port *port, unsigned char ch)
|
||||
+{
|
||||
+ struct uart_8250_port *up = up_to_u8250p(port);
|
||||
+ unsigned long flags;
|
||||
+
|
||||
+ wait_for_xmitr(up, UART_LSR_THRE);
|
||||
+
|
||||
+ printk_cpu_sync_get_irqsave(flags);
|
||||
+ serial8250_console_putchar_locked(port, ch);
|
||||
+ printk_cpu_sync_put_irqrestore(flags);
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Restore serial console when h/w power-off detected
|
||||
*/
|
||||
@@ -3355,6 +3427,32 @@ static void serial8250_console_restore(struct uart_8250_port *up)
|
||||
serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS);
|
||||
}
|
||||
|
||||
+void serial8250_console_write_atomic(struct uart_8250_port *up,
|
||||
+ const char *s, unsigned int count)
|
||||
+{
|
||||
+ struct uart_port *port = &up->port;
|
||||
+ unsigned long flags;
|
||||
+ unsigned int ier;
|
||||
+
|
||||
+ printk_cpu_sync_get_irqsave(flags);
|
||||
+
|
||||
+ touch_nmi_watchdog();
|
||||
+
|
||||
+ ier = serial8250_clear_IER(up);
|
||||
+
|
||||
+ if (atomic_fetch_inc(&up->console_printing)) {
|
||||
+ uart_console_write(port, "\n", 1,
|
||||
+ serial8250_console_putchar_locked);
|
||||
+ }
|
||||
+ uart_console_write(port, s, count, serial8250_console_putchar_locked);
|
||||
+ atomic_dec(&up->console_printing);
|
||||
+
|
||||
+ wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
|
||||
+ serial8250_set_IER(up, ier);
|
||||
+
|
||||
+ printk_cpu_sync_put_irqrestore(flags);
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Print a string to the serial port using the device FIFO
|
||||
*
|
||||
@@ -3400,20 +3498,15 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
|
||||
struct uart_port *port = &up->port;
|
||||
unsigned long flags;
|
||||
unsigned int ier, use_fifo;
|
||||
- int locked = 1;
|
||||
|
||||
touch_nmi_watchdog();
|
||||
|
||||
- if (oops_in_progress)
|
||||
- locked = spin_trylock_irqsave(&port->lock, flags);
|
||||
- else
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ spin_lock_irqsave(&port->lock, flags);
|
||||
|
||||
/*
|
||||
* First save the IER then disable the interrupts
|
||||
*/
|
||||
- ier = serial_port_in(port, UART_IER);
|
||||
- serial8250_clear_IER(up);
|
||||
+ ier = serial8250_clear_IER(up);
|
||||
|
||||
/* check scratch reg to see if port powered off during system sleep */
|
||||
if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
|
||||
@@ -3447,10 +3540,12 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
|
||||
*/
|
||||
!(up->port.flags & UPF_CONS_FLOW);
|
||||
|
||||
+ atomic_inc(&up->console_printing);
|
||||
if (likely(use_fifo))
|
||||
serial8250_console_fifo_write(up, s, count);
|
||||
else
|
||||
uart_console_write(port, s, count, serial8250_console_putchar);
|
||||
+ atomic_dec(&up->console_printing);
|
||||
|
||||
/*
|
||||
* Finally, wait for transmitter to become empty
|
||||
@@ -3463,8 +3558,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
|
||||
if (em485->tx_stopped)
|
||||
up->rs485_stop_tx(up);
|
||||
}
|
||||
-
|
||||
- serial_port_out(port, UART_IER, ier);
|
||||
+ serial8250_set_IER(up, ier);
|
||||
|
||||
/*
|
||||
* The receive handling will happen properly because the
|
||||
@@ -3476,8 +3570,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
|
||||
if (up->msr_saved_flags)
|
||||
serial8250_modem_status(up);
|
||||
|
||||
- if (locked)
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ spin_unlock_irqrestore(&port->lock, flags);
|
||||
}
|
||||
|
||||
static unsigned int probe_baud(struct uart_port *port)
|
||||
@@ -3497,6 +3590,7 @@ static unsigned int probe_baud(struct uart_port *port)
|
||||
|
||||
int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
|
||||
{
|
||||
+ struct uart_8250_port *up = up_to_u8250p(port);
|
||||
int baud = 9600;
|
||||
int bits = 8;
|
||||
int parity = 'n';
|
||||
@@ -3506,6 +3600,8 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
|
||||
if (!port->iobase && !port->membase)
|
||||
return -ENODEV;
|
||||
|
||||
+ atomic_set(&up->console_printing, 0);
|
||||
+
|
||||
if (options)
|
||||
uart_parse_options(options, &baud, &parity, &bits, &flow);
|
||||
else if (probe)
|
||||
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
|
||||
index 583a340f9934..1f31320820a6 100644
|
||||
--- a/drivers/tty/serial/8250/Kconfig
|
||||
+++ b/drivers/tty/serial/8250/Kconfig
|
||||
@@ -9,6 +9,7 @@ config SERIAL_8250
|
||||
depends on !S390
|
||||
select SERIAL_CORE
|
||||
select SERIAL_MCTRL_GPIO if GPIOLIB
|
||||
+ select HAVE_ATOMIC_CONSOLE
|
||||
help
|
||||
This selects whether you want to include the driver for the standard
|
||||
serial ports. The standard answer is Y. People who might say N
|
||||
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
|
||||
index 79b328861c5f..35f44352e641 100644
|
||||
--- a/include/linux/serial_8250.h
|
||||
+++ b/include/linux/serial_8250.h
|
||||
@@ -7,6 +7,7 @@
|
||||
#ifndef _LINUX_SERIAL_8250_H
|
||||
#define _LINUX_SERIAL_8250_H
|
||||
|
||||
+#include <linux/atomic.h>
|
||||
#include <linux/serial_core.h>
|
||||
#include <linux/serial_reg.h>
|
||||
#include <linux/platform_device.h>
|
||||
@@ -124,6 +125,8 @@ struct uart_8250_port {
|
||||
#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
|
||||
unsigned char msr_saved_flags;
|
||||
|
||||
+ atomic_t console_printing;
|
||||
+
|
||||
struct uart_8250_dma *dma;
|
||||
const struct uart_8250_ops *ops;
|
||||
|
||||
@@ -179,6 +182,8 @@ void serial8250_init_port(struct uart_8250_port *up);
|
||||
void serial8250_set_defaults(struct uart_8250_port *up);
|
||||
void serial8250_console_write(struct uart_8250_port *up, const char *s,
|
||||
unsigned int count);
|
||||
+void serial8250_console_write_atomic(struct uart_8250_port *up, const char *s,
|
||||
+ unsigned int count);
|
||||
int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
|
||||
int serial8250_console_exit(struct uart_port *port);
|
||||
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
From 9220b21155b9f55725886fb46c85df7ee0288ce0 Mon Sep 17 00:00:00 2001
|
||||
From ed846d5dfed571d1dc9ab756a1240a93018004b5 Mon Sep 17 00:00:00 2001
|
||||
From: Frederic Weisbecker <frederic@kernel.org>
|
||||
Date: Tue, 5 Apr 2022 03:07:52 +0200
|
||||
Subject: [PATCH 14/62] tick: Fix timer storm since introduction of timersd
|
||||
Subject: [PATCH 021/195] tick: Fix timer storm since introduction of timersd
|
||||
|
||||
If timers are pending while the tick is reprogrammed on nohz_mode, the
|
||||
next expiry is not armed to fire now, it is delayed one jiffy forward
|
||||
|
@ -80,10 +80,10 @@ index f459b0f27c94..a5091ac97fc6 100644
|
|||
|
||||
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
|
||||
diff --git a/kernel/softirq.c b/kernel/softirq.c
|
||||
index 1892af494cdd..ab1fe34326ba 100644
|
||||
index 1277abc94228..a4d359e2c2b5 100644
|
||||
--- a/kernel/softirq.c
|
||||
+++ b/kernel/softirq.c
|
||||
@@ -639,12 +639,7 @@ static inline void tick_irq_exit(void)
|
||||
@@ -621,12 +621,7 @@ static inline void tick_irq_exit(void)
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RT
|
||||
DEFINE_PER_CPU(struct task_struct *, timersd);
|
||||
|
@ -98,10 +98,10 @@ index 1892af494cdd..ab1fe34326ba 100644
|
|||
static void wake_timersd(void)
|
||||
{
|
||||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
|
||||
index 798e1841d286..b52e1861b913 100644
|
||||
index 5cbd0cee83c0..2d7036714c81 100644
|
||||
--- a/kernel/time/tick-sched.c
|
||||
+++ b/kernel/time/tick-sched.c
|
||||
@@ -800,7 +800,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
|
||||
@@ -795,7 +795,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
|
||||
|
||||
static inline bool local_timer_softirq_pending(void)
|
||||
{
|
|
@ -1,95 +0,0 @@
|
|||
From ca6770a982bcd5497cd057b5e0a8126c2e589a14 Mon Sep 17 00:00:00 2001
|
||||
From: John Ogness <john.ogness@linutronix.de>
|
||||
Date: Fri, 4 Feb 2022 16:01:17 +0106
|
||||
Subject: [PATCH 22/62] printk: avoid preempt_disable() for PREEMPT_RT
|
||||
|
||||
During non-normal operation, printk() calls will attempt to
|
||||
write the messages directly to the consoles. This involves
|
||||
using console_trylock() to acquire @console_sem.
|
||||
|
||||
Preemption is disabled while directly printing to the consoles
|
||||
in order to ensure that the printing task is not scheduled away
|
||||
while holding @console_sem, thus blocking all other printers
|
||||
and causing delays in printing.
|
||||
|
||||
Commit fd5f7cde1b85 ("printk: Never set console_may_schedule in
|
||||
console_trylock()") specifically reverted a previous attempt at
|
||||
allowing preemption while printing.
|
||||
|
||||
However, on PREEMPT_RT systems, disabling preemption while
|
||||
printing is not allowed because console drivers typically
|
||||
acquire a spin lock (which under PREEMPT_RT is an rtmutex).
|
||||
Since direct printing is only used during early boot and
|
||||
non-panic dumps, the risks of delayed print output for these
|
||||
scenarios will be accepted under PREEMPT_RT.
|
||||
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
kernel/printk/printk.c | 21 ++++++++++++++++++++-
|
||||
1 file changed, 20 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
|
||||
index 73b1727087c7..3d0ff49cca29 100644
|
||||
--- a/kernel/printk/printk.c
|
||||
+++ b/kernel/printk/printk.c
|
||||
@@ -1981,6 +1981,7 @@ static int console_lock_spinning_disable_and_check(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
+#if !IS_ENABLED(CONFIG_PREEMPT_RT)
|
||||
/**
|
||||
* console_trylock_spinning - try to get console_lock by busy waiting
|
||||
*
|
||||
@@ -2054,6 +2055,7 @@ static int console_trylock_spinning(void)
|
||||
|
||||
return 1;
|
||||
}
|
||||
+#endif /* CONFIG_PREEMPT_RT */
|
||||
|
||||
/*
|
||||
* Call the specified console driver, asking it to write out the specified
|
||||
@@ -2393,6 +2395,18 @@ asmlinkage int vprintk_emit(int facility, int level,
|
||||
|
||||
/* If called from the scheduler, we can not call up(). */
|
||||
if (!in_sched && allow_direct_printing()) {
|
||||
+#if IS_ENABLED(CONFIG_PREEMPT_RT)
|
||||
+ /*
|
||||
+ * Use the non-spinning trylock since PREEMPT_RT does not
|
||||
+ * support console lock handovers.
|
||||
+ *
|
||||
+ * Direct printing will most likely involve taking spinlocks.
|
||||
+ * For PREEMPT_RT, this is only allowed if in a preemptible
|
||||
+ * context.
|
||||
+ */
|
||||
+ if (preemptible() && console_trylock())
|
||||
+ console_unlock();
|
||||
+#else
|
||||
/*
|
||||
* The caller may be holding system-critical or
|
||||
* timing-sensitive locks. Disable preemption during direct
|
||||
@@ -2410,6 +2424,7 @@ asmlinkage int vprintk_emit(int facility, int level,
|
||||
if (console_trylock_spinning())
|
||||
console_unlock();
|
||||
preempt_enable();
|
||||
+#endif
|
||||
}
|
||||
|
||||
if (in_sched)
|
||||
@@ -3119,8 +3134,12 @@ static bool console_emit_next_record_transferable(struct console *con, char *tex
|
||||
/*
|
||||
* Handovers are only supported if threaded printers are atomically
|
||||
* blocked. The context taking over the console_lock may be atomic.
|
||||
+ *
|
||||
+ * PREEMPT_RT also does not support handovers because the spinning
|
||||
+ * waiter can cause large latencies.
|
||||
*/
|
||||
- if (!console_kthreads_atomically_blocked()) {
|
||||
+ if (!console_kthreads_atomically_blocked() ||
|
||||
+ IS_ENABLED(CONFIG_PREEMPT_RT)) {
|
||||
*handover = false;
|
||||
handover = NULL;
|
||||
}
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
From 7c8c231b4a43dd06e6c90d5c820342ee3f8e9130 Mon Sep 17 00:00:00 2001
|
||||
From cb9cea518effb1b12b36be953f46c66fcca40c2f Mon Sep 17 00:00:00 2001
|
||||
From: Junxiao Chang <junxiao.chang@intel.com>
|
||||
Date: Mon, 20 Feb 2023 09:12:20 +0100
|
||||
Subject: [PATCH 15/62] softirq: Wake ktimers thread also in softirq.
|
||||
Subject: [PATCH 022/195] softirq: Wake ktimers thread also in softirq.
|
||||
|
||||
If the hrtimer is raised while a softirq is processed then it does not
|
||||
wake the corresponding ktimers thread. This is due to the optimisation in the
|
||||
|
@ -22,10 +22,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|||
1 file changed, 5 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/kernel/softirq.c b/kernel/softirq.c
|
||||
index ab1fe34326ba..82f3e68fbe22 100644
|
||||
index a4d359e2c2b5..c2474cc4fa51 100644
|
||||
--- a/kernel/softirq.c
|
||||
+++ b/kernel/softirq.c
|
||||
@@ -664,13 +664,12 @@ static inline void __irq_exit_rcu(void)
|
||||
@@ -646,13 +646,12 @@ static inline void __irq_exit_rcu(void)
|
||||
#endif
|
||||
account_hardirq_exit(current);
|
||||
preempt_count_sub(HARDIRQ_OFFSET);
|
|
@ -1,31 +1,33 @@
|
|||
From 5881f1272a6bf4dc4c7553942f95fa850416700c Mon Sep 17 00:00:00 2001
|
||||
From d25c9b1588e64c6b128e958bdb998668f65bd03a Mon Sep 17 00:00:00 2001
|
||||
From: Mike Galbraith <umgwanakikbuti@gmail.com>
|
||||
Date: Thu, 31 Mar 2016 04:08:28 +0200
|
||||
Subject: [PATCH 17/62] zram: Replace bit spinlocks with spinlock_t for
|
||||
Subject: [PATCH 023/195] zram: Replace bit spinlocks with spinlock_t for
|
||||
PREEMPT_RT.
|
||||
|
||||
The bit spinlock disables preemption on PREEMPT_RT. With disabled preemption it
|
||||
is not allowed to acquire other sleeping locks which includes invoking
|
||||
zs_free().
|
||||
The bit spinlock disables preemption. The spinlock_t lock becomes a sleeping
|
||||
lock on PREEMPT_RT and it can not be acquired in this context. In this locked
|
||||
section, zs_free() acquires a zs_pool::lock, and there is access to
|
||||
zram::wb_limit_lock.
|
||||
|
||||
Use a spinlock_t on PREEMPT_RT for locking and set/ clear ZRAM_LOCK after the
|
||||
lock has been acquired/ dropped.
|
||||
Use a spinlock_t on PREEMPT_RT for locking and set/ clear ZRAM_LOCK bit after
|
||||
the lock has been acquired/ dropped.
|
||||
|
||||
Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Link: https://lkml.kernel.org/r/YqIbMuHCPiQk+Ac2@linutronix.de
|
||||
Link: https://lore.kernel.org/20230323161830.jFbWCosd@linutronix.de
|
||||
---
|
||||
drivers/block/zram/zram_drv.c | 36 +++++++++++++++++++++++++++++++++++
|
||||
drivers/block/zram/zram_drv.c | 37 +++++++++++++++++++++++++++++++++++
|
||||
drivers/block/zram/zram_drv.h | 3 +++
|
||||
2 files changed, 39 insertions(+)
|
||||
2 files changed, 40 insertions(+)
|
||||
|
||||
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
|
||||
index 966aab902d19..ee69e4443691 100644
|
||||
index 06673c6ca255..a5d0f7c06342 100644
|
||||
--- a/drivers/block/zram/zram_drv.c
|
||||
+++ b/drivers/block/zram/zram_drv.c
|
||||
@@ -57,6 +57,40 @@ static void zram_free_page(struct zram *zram, size_t index);
|
||||
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
|
||||
u32 index, int offset, struct bio *bio);
|
||||
@@ -57,6 +57,41 @@ static void zram_free_page(struct zram *zram, size_t index);
|
||||
static int zram_read_page(struct zram *zram, struct page *page, u32 index,
|
||||
struct bio *parent);
|
||||
|
||||
+#ifdef CONFIG_PREEMPT_RT
|
||||
+static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages)
|
||||
|
@ -61,10 +63,11 @@ index 966aab902d19..ee69e4443691 100644
|
|||
+#else
|
||||
+
|
||||
+static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) { }
|
||||
|
||||
+
|
||||
static int zram_slot_trylock(struct zram *zram, u32 index)
|
||||
{
|
||||
@@ -72,6 +106,7 @@ static void zram_slot_unlock(struct zram *zram, u32 index)
|
||||
return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
|
||||
@@ -71,6 +106,7 @@ static void zram_slot_unlock(struct zram *zram, u32 index)
|
||||
{
|
||||
bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
|
||||
}
|
||||
|
@ -72,7 +75,7 @@ index 966aab902d19..ee69e4443691 100644
|
|||
|
||||
static inline bool init_done(struct zram *zram)
|
||||
{
|
||||
@@ -1187,6 +1222,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
|
||||
@@ -1245,6 +1281,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
|
||||
|
||||
if (!huge_class_size)
|
||||
huge_class_size = zs_huge_class_size(zram->mem_pool);
|
||||
|
@ -81,10 +84,10 @@ index 966aab902d19..ee69e4443691 100644
|
|||
}
|
||||
|
||||
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
|
||||
index a2bda53020fd..ae7950b26db5 100644
|
||||
index ca7a15bd4845..e64eb607eb45 100644
|
||||
--- a/drivers/block/zram/zram_drv.h
|
||||
+++ b/drivers/block/zram/zram_drv.h
|
||||
@@ -62,6 +62,9 @@ struct zram_table_entry {
|
||||
@@ -69,6 +69,9 @@ struct zram_table_entry {
|
||||
unsigned long element;
|
||||
};
|
||||
unsigned long flags;
|
|
@ -0,0 +1,52 @@
|
|||
From 537326d9f40fb9b84eb9df67a0f8da0b4deee025 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Wed, 8 Mar 2023 16:29:38 +0100
|
||||
Subject: [PATCH 024/195] preempt: Put preempt_enable() within an
|
||||
instrumentation*() section.
|
||||
|
||||
Callers of preempt_enable() can be within an noinstr section leading to:
|
||||
| vmlinux.o: warning: objtool: native_sched_clock+0x97: call to preempt_schedule_notrace_thunk() leaves .noinstr.text section
|
||||
| vmlinux.o: warning: objtool: kvm_clock_read+0x22: call to preempt_schedule_notrace_thunk() leaves .noinstr.text section
|
||||
| vmlinux.o: warning: objtool: local_clock+0xb4: call to preempt_schedule_notrace_thunk() leaves .noinstr.text section
|
||||
| vmlinux.o: warning: objtool: enter_from_user_mode+0xea: call to preempt_schedule_thunk() leaves .noinstr.text section
|
||||
| vmlinux.o: warning: objtool: syscall_enter_from_user_mode+0x140: call to preempt_schedule_thunk() leaves .noinstr.text section
|
||||
| vmlinux.o: warning: objtool: syscall_enter_from_user_mode_prepare+0xf2: call to preempt_schedule_thunk() leaves .noinstr.text section
|
||||
| vmlinux.o: warning: objtool: irqentry_enter_from_user_mode+0xea: call to preempt_schedule_thunk() leaves .noinstr.text section
|
||||
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230309072724.3F6zRkvw@linutronix.de
|
||||
---
|
||||
include/linux/preempt.h | 10 ++++++++--
|
||||
1 file changed, 8 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
|
||||
index 9aa6358a1a16..cd16f0330fba 100644
|
||||
--- a/include/linux/preempt.h
|
||||
+++ b/include/linux/preempt.h
|
||||
@@ -230,15 +230,21 @@ do { \
|
||||
#define preempt_enable() \
|
||||
do { \
|
||||
barrier(); \
|
||||
- if (unlikely(preempt_count_dec_and_test())) \
|
||||
+ if (unlikely(preempt_count_dec_and_test())) { \
|
||||
+ instrumentation_begin(); \
|
||||
__preempt_schedule(); \
|
||||
+ instrumentation_end(); \
|
||||
+ } \
|
||||
} while (0)
|
||||
|
||||
#define preempt_enable_notrace() \
|
||||
do { \
|
||||
barrier(); \
|
||||
- if (unlikely(__preempt_count_dec_and_test())) \
|
||||
+ if (unlikely(__preempt_count_dec_and_test())) { \
|
||||
+ instrumentation_begin(); \
|
||||
__preempt_schedule_notrace(); \
|
||||
+ instrumentation_end(); \
|
||||
+ } \
|
||||
} while (0)
|
||||
|
||||
#define preempt_check_resched() \
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
From a3b4b96acf6a09da67d22e7aa8a62f250bfc6e25 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Fri, 4 Aug 2023 13:30:37 +0200
|
||||
Subject: [PATCH 025/195] sched/core: Provide a method to check if a task is
|
||||
PI-boosted.
|
||||
|
||||
Provide a method to check if a task inherited the priority from another
|
||||
task. This happens if a task owns a lock which is requested by a task
|
||||
with higher priority. This can be used as a hint to add a preemption
|
||||
point to the critical section.
|
||||
|
||||
Provide a function which reports true if the task is PI-boosted.
|
||||
|
||||
Link: https://lore.kernel.org/r/20230804113039.419794-2-bigeasy@linutronix.de
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
include/linux/sched.h | 1 +
|
||||
kernel/sched/core.c | 15 +++++++++++++++
|
||||
2 files changed, 16 insertions(+)
|
||||
|
||||
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
||||
index 67623ffd4a8e..eab173e5d09b 100644
|
||||
--- a/include/linux/sched.h
|
||||
+++ b/include/linux/sched.h
|
||||
@@ -1905,6 +1905,7 @@ static inline int dl_task_check_affinity(struct task_struct *p, const struct cpu
|
||||
}
|
||||
#endif
|
||||
|
||||
+extern bool task_is_pi_boosted(const struct task_struct *p);
|
||||
extern int yield_to(struct task_struct *p, bool preempt);
|
||||
extern void set_user_nice(struct task_struct *p, long nice);
|
||||
extern int task_prio(const struct task_struct *p);
|
||||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
||||
index 90f9124ac027..7134598e3284 100644
|
||||
--- a/kernel/sched/core.c
|
||||
+++ b/kernel/sched/core.c
|
||||
@@ -8923,6 +8923,21 @@ static inline void preempt_dynamic_init(void) { }
|
||||
|
||||
#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
|
||||
|
||||
+/*
|
||||
+ * task_is_pi_boosted - Check if task has been PI boosted.
|
||||
+ * @p: Task to check.
|
||||
+ *
|
||||
+ * Return true if task is subject to priority inheritance.
|
||||
+ */
|
||||
+bool task_is_pi_boosted(const struct task_struct *p)
|
||||
+{
|
||||
+ int prio = p->prio;
|
||||
+
|
||||
+ if (!rt_prio(prio))
|
||||
+ return false;
|
||||
+ return prio != p->normal_prio;
|
||||
+}
|
||||
+
|
||||
/**
|
||||
* yield - yield the current processor to other threads.
|
||||
*
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
From c330e617b94bbc517da0aaabecfd3b3c007d3e62 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Fri, 4 Aug 2023 13:30:38 +0200
|
||||
Subject: [PATCH 026/195] softirq: Add function to preempt serving softirqs.
|
||||
|
||||
Add a functionality for the softirq handler to preempt its current work
|
||||
if needed. The softirq core has no particular state. It reads and resets
|
||||
the pending softirq bits and then processes one after the other.
|
||||
It can already be preempted while it invokes a certain softirq handler.
|
||||
|
||||
By enabling the BH the softirq core releases the per-CPU bh lock which
|
||||
serializes all softirq handler. It is safe to do as long as the code
|
||||
does not expect any serialisation in between. A typical scenarion would
|
||||
after the invocation of callback where no state needs to be preserved
|
||||
before the next callback is invoked.
|
||||
|
||||
Add functionaliry to preempt the serving softirqs.
|
||||
|
||||
Link: https://lore.kernel.org/r/20230804113039.419794-3-bigeasy@linutronix.de
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
include/linux/bottom_half.h | 2 ++
|
||||
kernel/softirq.c | 13 +++++++++++++
|
||||
2 files changed, 15 insertions(+)
|
||||
|
||||
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
|
||||
index fc53e0ad56d9..448bbef47456 100644
|
||||
--- a/include/linux/bottom_half.h
|
||||
+++ b/include/linux/bottom_half.h
|
||||
@@ -35,8 +35,10 @@ static inline void local_bh_enable(void)
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RT
|
||||
extern bool local_bh_blocked(void);
|
||||
+extern void softirq_preempt(void);
|
||||
#else
|
||||
static inline bool local_bh_blocked(void) { return false; }
|
||||
+static inline void softirq_preempt(void) { }
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_BH_H */
|
||||
diff --git a/kernel/softirq.c b/kernel/softirq.c
|
||||
index c2474cc4fa51..cae0ae2e2b0b 100644
|
||||
--- a/kernel/softirq.c
|
||||
+++ b/kernel/softirq.c
|
||||
@@ -247,6 +247,19 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
|
||||
}
|
||||
EXPORT_SYMBOL(__local_bh_enable_ip);
|
||||
|
||||
+void softirq_preempt(void)
|
||||
+{
|
||||
+ if (WARN_ON_ONCE(!preemptible()))
|
||||
+ return;
|
||||
+
|
||||
+ if (WARN_ON_ONCE(__this_cpu_read(softirq_ctrl.cnt) != SOFTIRQ_OFFSET))
|
||||
+ return;
|
||||
+
|
||||
+ __local_bh_enable(SOFTIRQ_OFFSET, true);
|
||||
+ /* preemption point */
|
||||
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Invoked from ksoftirqd_run() outside of the interrupt disabled section
|
||||
* to acquire the per CPU local lock for reentrancy protection.
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
From 811417a29d37605f932c88499d94379ac8535991 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Fri, 4 Aug 2023 13:30:39 +0200
|
||||
Subject: [PATCH 027/195] time: Allow to preempt after a callback.
|
||||
|
||||
The TIMER_SOFTIRQ handler invokes timer callbacks of the expired timers.
|
||||
Before each invocation the timer_base::lock is dropped. The only lock
|
||||
that is still held is the timer_base::expiry_lock and the per-CPU
|
||||
bh-lock as part of local_bh_disable(). The former is released as part
|
||||
of lock up prevention if the timer is preempted by the caller which is
|
||||
waiting for its completion.
|
||||
|
||||
Both locks are already released as part of timer_sync_wait_running().
|
||||
This can be extended by also releasing in bh-lock. The timer core does
|
||||
not rely on any state that is serialized by the bh-lock. The timer
|
||||
callback expects the bh-state to be serialized by the lock but there is
|
||||
no need to keep state synchronized while invoking multiple callbacks.
|
||||
|
||||
Preempt handling softirqs and release all locks after a timer invocation
|
||||
if the current has inherited priority.
|
||||
|
||||
Link: https://lore.kernel.org/r/20230804113039.419794-4-bigeasy@linutronix.de
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
kernel/time/timer.c | 9 ++++++++-
|
||||
1 file changed, 8 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
|
||||
index 7cad6fe3c035..b3fbe97d1e34 100644
|
||||
--- a/kernel/time/timer.c
|
||||
+++ b/kernel/time/timer.c
|
||||
@@ -1470,9 +1470,16 @@ static inline void timer_base_unlock_expiry(struct timer_base *base)
|
||||
*/
|
||||
static void timer_sync_wait_running(struct timer_base *base)
|
||||
{
|
||||
- if (atomic_read(&base->timer_waiters)) {
|
||||
+ bool need_preempt;
|
||||
+
|
||||
+ need_preempt = task_is_pi_boosted(current);
|
||||
+ if (need_preempt || atomic_read(&base->timer_waiters)) {
|
||||
raw_spin_unlock_irq(&base->lock);
|
||||
spin_unlock(&base->expiry_lock);
|
||||
+
|
||||
+ if (need_preempt)
|
||||
+ softirq_preempt();
|
||||
+
|
||||
spin_lock(&base->expiry_lock);
|
||||
raw_spin_lock_irq(&base->lock);
|
||||
}
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,131 @@
|
|||
From 681f56a112fc24a97bfdffcef7655e5bf9ffb6e8 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:18 +0206
|
||||
Subject: [PATCH 028/195] serial: core: Provide port lock wrappers
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
Provide wrapper functions for spin_[un]lock*(port->lock) invocations so
|
||||
that the console mechanics can be applied later on at a single place and
|
||||
does not require to copy the same logic all over the drivers.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-2-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
include/linux/serial_core.h | 79 +++++++++++++++++++++++++++++++++++++
|
||||
1 file changed, 79 insertions(+)
|
||||
|
||||
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
|
||||
index bb6f073bc159..f1d5c0d1568c 100644
|
||||
--- a/include/linux/serial_core.h
|
||||
+++ b/include/linux/serial_core.h
|
||||
@@ -588,6 +588,85 @@ struct uart_port {
|
||||
void *private_data; /* generic platform data pointer */
|
||||
};
|
||||
|
||||
+/**
|
||||
+ * uart_port_lock - Lock the UART port
|
||||
+ * @up: Pointer to UART port structure
|
||||
+ */
|
||||
+static inline void uart_port_lock(struct uart_port *up)
|
||||
+{
|
||||
+ spin_lock(&up->lock);
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * uart_port_lock_irq - Lock the UART port and disable interrupts
|
||||
+ * @up: Pointer to UART port structure
|
||||
+ */
|
||||
+static inline void uart_port_lock_irq(struct uart_port *up)
|
||||
+{
|
||||
+ spin_lock_irq(&up->lock);
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * uart_port_lock_irqsave - Lock the UART port, save and disable interrupts
|
||||
+ * @up: Pointer to UART port structure
|
||||
+ * @flags: Pointer to interrupt flags storage
|
||||
+ */
|
||||
+static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
|
||||
+{
|
||||
+ spin_lock_irqsave(&up->lock, *flags);
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * uart_port_trylock - Try to lock the UART port
|
||||
+ * @up: Pointer to UART port structure
|
||||
+ *
|
||||
+ * Returns: True if lock was acquired, false otherwise
|
||||
+ */
|
||||
+static inline bool uart_port_trylock(struct uart_port *up)
|
||||
+{
|
||||
+ return spin_trylock(&up->lock);
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts
|
||||
+ * @up: Pointer to UART port structure
|
||||
+ * @flags: Pointer to interrupt flags storage
|
||||
+ *
|
||||
+ * Returns: True if lock was acquired, false otherwise
|
||||
+ */
|
||||
+static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
|
||||
+{
|
||||
+ return spin_trylock_irqsave(&up->lock, *flags);
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * uart_port_unlock - Unlock the UART port
|
||||
+ * @up: Pointer to UART port structure
|
||||
+ */
|
||||
+static inline void uart_port_unlock(struct uart_port *up)
|
||||
+{
|
||||
+ spin_unlock(&up->lock);
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * uart_port_unlock_irq - Unlock the UART port and re-enable interrupts
|
||||
+ * @up: Pointer to UART port structure
|
||||
+ */
|
||||
+static inline void uart_port_unlock_irq(struct uart_port *up)
|
||||
+{
|
||||
+ spin_unlock_irq(&up->lock);
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
+ * uart_port_lock_irqrestore - Unlock the UART port, restore interrupts
|
||||
+ * @up: Pointer to UART port structure
|
||||
+ * @flags: The saved interrupt flags for restore
|
||||
+ */
|
||||
+static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
|
||||
+{
|
||||
+ spin_unlock_irqrestore(&up->lock, flags);
|
||||
+}
|
||||
+
|
||||
static inline int serial_port_in(struct uart_port *up, int offset)
|
||||
{
|
||||
return up->serial_in(up, offset);
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,98 @@
|
|||
From e642eaa68ae3692f66e9f8b987f23aebc709e326 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:19 +0206
|
||||
Subject: [PATCH 029/195] serial: core: Use lock wrappers
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-3-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
include/linux/serial_core.h | 12 ++++++------
|
||||
1 file changed, 6 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
|
||||
index f1d5c0d1568c..3091c62ec37b 100644
|
||||
--- a/include/linux/serial_core.h
|
||||
+++ b/include/linux/serial_core.h
|
||||
@@ -1035,14 +1035,14 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
|
||||
u8 sysrq_ch;
|
||||
|
||||
if (!port->has_sysrq) {
|
||||
- spin_unlock(&port->lock);
|
||||
+ uart_port_unlock(port);
|
||||
return;
|
||||
}
|
||||
|
||||
sysrq_ch = port->sysrq_ch;
|
||||
port->sysrq_ch = 0;
|
||||
|
||||
- spin_unlock(&port->lock);
|
||||
+ uart_port_unlock(port);
|
||||
|
||||
if (sysrq_ch)
|
||||
handle_sysrq(sysrq_ch);
|
||||
@@ -1054,14 +1054,14 @@ static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port
|
||||
u8 sysrq_ch;
|
||||
|
||||
if (!port->has_sysrq) {
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
sysrq_ch = port->sysrq_ch;
|
||||
port->sysrq_ch = 0;
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
if (sysrq_ch)
|
||||
handle_sysrq(sysrq_ch);
|
||||
@@ -1077,12 +1077,12 @@ static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch)
|
||||
}
|
||||
static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
|
||||
{
|
||||
- spin_unlock(&port->lock);
|
||||
+ uart_port_unlock(port);
|
||||
}
|
||||
static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
|
||||
unsigned long flags)
|
||||
{
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
#endif /* CONFIG_MAGIC_SYSRQ_SERIAL */
|
||||
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,80 @@
|
|||
From 5acefbbc9066e05a67448a4ddbf2385cf8caedd5 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:20 +0206
|
||||
Subject: [PATCH 030/195] serial: 21285: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-4-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/21285.c | 8 ++++----
|
||||
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c
|
||||
index d756fcc884cb..4de0c975ebdc 100644
|
||||
--- a/drivers/tty/serial/21285.c
|
||||
+++ b/drivers/tty/serial/21285.c
|
||||
@@ -185,14 +185,14 @@ static void serial21285_break_ctl(struct uart_port *port, int break_state)
|
||||
unsigned long flags;
|
||||
unsigned int h_lcr;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
h_lcr = *CSR_H_UBRLCR;
|
||||
if (break_state)
|
||||
h_lcr |= H_UBRLCR_BREAK;
|
||||
else
|
||||
h_lcr &= ~H_UBRLCR_BREAK;
|
||||
*CSR_H_UBRLCR = h_lcr;
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static int serial21285_startup(struct uart_port *port)
|
||||
@@ -272,7 +272,7 @@ serial21285_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
if (port->fifosize)
|
||||
h_lcr |= H_UBRLCR_FIFO;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
/*
|
||||
* Update the per-port timeout.
|
||||
@@ -309,7 +309,7 @@ serial21285_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
*CSR_H_UBRLCR = h_lcr;
|
||||
*CSR_UARTCON = 1;
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static const char *serial21285_type(struct uart_port *port)
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
From c33a0cbdf0fa230865a8dfdb6470235689178915 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:21 +0206
|
||||
Subject: [PATCH 031/195] serial: 8250_aspeed_vuart: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-5-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/8250/8250_aspeed_vuart.c | 6 +++---
|
||||
1 file changed, 3 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
|
||||
index 4a9e71b2dbbc..021949f252f8 100644
|
||||
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
|
||||
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
|
||||
@@ -288,9 +288,9 @@ static void aspeed_vuart_set_throttle(struct uart_port *port, bool throttle)
|
||||
struct uart_8250_port *up = up_to_u8250p(port);
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
__aspeed_vuart_set_throttle(up, throttle);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static void aspeed_vuart_throttle(struct uart_port *port)
|
||||
@@ -340,7 +340,7 @@ static int aspeed_vuart_handle_irq(struct uart_port *port)
|
||||
if (iir & UART_IIR_NO_INT)
|
||||
return 0;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
lsr = serial_port_in(port, UART_LSR);
|
||||
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,713 +0,0 @@
|
|||
From fc4755c31f7adcf012556bfc2db7e84c78aa0ad2 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Fri, 26 Oct 2012 18:50:54 +0100
|
||||
Subject: [PATCH 32/62] sched: Add support for lazy preemption
|
||||
|
||||
It has become an obsession to mitigate the determinism vs. throughput
|
||||
loss of RT. Looking at the mainline semantics of preemption points
|
||||
gives a hint why RT sucks throughput wise for ordinary SCHED_OTHER
|
||||
tasks. One major issue is the wakeup of tasks which are right away
|
||||
preempting the waking task while the waking task holds a lock on which
|
||||
the woken task will block right after having preempted the wakee. In
|
||||
mainline this is prevented due to the implicit preemption disable of
|
||||
spin/rw_lock held regions. On RT this is not possible due to the fully
|
||||
preemptible nature of sleeping spinlocks.
|
||||
|
||||
Though for a SCHED_OTHER task preempting another SCHED_OTHER task this
|
||||
is really not a correctness issue. RT folks are concerned about
|
||||
SCHED_FIFO/RR tasks preemption and not about the purely fairness
|
||||
driven SCHED_OTHER preemption latencies.
|
||||
|
||||
So I introduced a lazy preemption mechanism which only applies to
|
||||
SCHED_OTHER tasks preempting another SCHED_OTHER task. Aside of the
|
||||
existing preempt_count each tasks sports now a preempt_lazy_count
|
||||
which is manipulated on lock acquiry and release. This is slightly
|
||||
incorrect as for lazyness reasons I coupled this on
|
||||
migrate_disable/enable so some other mechanisms get the same treatment
|
||||
(e.g. get_cpu_light).
|
||||
|
||||
Now on the scheduler side instead of setting NEED_RESCHED this sets
|
||||
NEED_RESCHED_LAZY in case of a SCHED_OTHER/SCHED_OTHER preemption and
|
||||
therefor allows to exit the waking task the lock held region before
|
||||
the woken task preempts. That also works better for cross CPU wakeups
|
||||
as the other side can stay in the adaptive spinning loop.
|
||||
|
||||
For RT class preemption there is no change. This simply sets
|
||||
NEED_RESCHED and forgoes the lazy preemption counter.
|
||||
|
||||
Initial test do not expose any observable latency increasement, but
|
||||
history shows that I've been proven wrong before :)
|
||||
|
||||
The lazy preemption mode is per default on, but with
|
||||
CONFIG_SCHED_DEBUG enabled it can be disabled via:
|
||||
|
||||
# echo NO_PREEMPT_LAZY >/sys/kernel/debug/sched_features
|
||||
|
||||
and reenabled via
|
||||
|
||||
# echo PREEMPT_LAZY >/sys/kernel/debug/sched_features
|
||||
|
||||
The test results so far are very machine and workload dependent, but
|
||||
there is a clear trend that it enhances the non RT workload
|
||||
performance.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
include/linux/preempt.h | 54 ++++++++++++++++++++++--
|
||||
include/linux/sched.h | 37 +++++++++++++++++
|
||||
include/linux/thread_info.h | 12 +++++-
|
||||
include/linux/trace_events.h | 10 ++++-
|
||||
kernel/Kconfig.preempt | 6 +++
|
||||
kernel/sched/core.c | 79 +++++++++++++++++++++++++++++++++++-
|
||||
kernel/sched/fair.c | 16 ++++----
|
||||
kernel/sched/features.h | 3 ++
|
||||
kernel/sched/sched.h | 9 ++++
|
||||
kernel/trace/trace.c | 50 ++++++++++++++---------
|
||||
kernel/trace/trace_events.c | 1 +
|
||||
kernel/trace/trace_output.c | 18 +++++++-
|
||||
12 files changed, 260 insertions(+), 35 deletions(-)
|
||||
|
||||
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
|
||||
index 8cfcc5d45451..9fc4c4bb320f 100644
|
||||
--- a/include/linux/preempt.h
|
||||
+++ b/include/linux/preempt.h
|
||||
@@ -207,6 +207,20 @@ extern void preempt_count_sub(int val);
|
||||
#define preempt_count_inc() preempt_count_add(1)
|
||||
#define preempt_count_dec() preempt_count_sub(1)
|
||||
|
||||
+#ifdef CONFIG_PREEMPT_LAZY
|
||||
+#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
|
||||
+#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
|
||||
+#define inc_preempt_lazy_count() add_preempt_lazy_count(1)
|
||||
+#define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
|
||||
+#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
|
||||
+#else
|
||||
+#define add_preempt_lazy_count(val) do { } while (0)
|
||||
+#define sub_preempt_lazy_count(val) do { } while (0)
|
||||
+#define inc_preempt_lazy_count() do { } while (0)
|
||||
+#define dec_preempt_lazy_count() do { } while (0)
|
||||
+#define preempt_lazy_count() (0)
|
||||
+#endif
|
||||
+
|
||||
#ifdef CONFIG_PREEMPT_COUNT
|
||||
|
||||
#define preempt_disable() \
|
||||
@@ -215,6 +229,12 @@ do { \
|
||||
barrier(); \
|
||||
} while (0)
|
||||
|
||||
+#define preempt_lazy_disable() \
|
||||
+do { \
|
||||
+ inc_preempt_lazy_count(); \
|
||||
+ barrier(); \
|
||||
+} while (0)
|
||||
+
|
||||
#define sched_preempt_enable_no_resched() \
|
||||
do { \
|
||||
barrier(); \
|
||||
@@ -246,6 +266,18 @@ do { \
|
||||
__preempt_schedule(); \
|
||||
} while (0)
|
||||
|
||||
+/*
|
||||
+ * open code preempt_check_resched() because it is not exported to modules and
|
||||
+ * used by local_unlock() or bpf_enable_instrumentation().
|
||||
+ */
|
||||
+#define preempt_lazy_enable() \
|
||||
+do { \
|
||||
+ dec_preempt_lazy_count(); \
|
||||
+ barrier(); \
|
||||
+ if (should_resched(0)) \
|
||||
+ __preempt_schedule(); \
|
||||
+} while (0)
|
||||
+
|
||||
#else /* !CONFIG_PREEMPTION */
|
||||
#define preempt_enable() \
|
||||
do { \
|
||||
@@ -253,6 +285,12 @@ do { \
|
||||
preempt_count_dec(); \
|
||||
} while (0)
|
||||
|
||||
+#define preempt_lazy_enable() \
|
||||
+do { \
|
||||
+ dec_preempt_lazy_count(); \
|
||||
+ barrier(); \
|
||||
+} while (0)
|
||||
+
|
||||
#define preempt_enable_notrace() \
|
||||
do { \
|
||||
barrier(); \
|
||||
@@ -293,6 +331,9 @@ do { \
|
||||
#define preempt_enable_notrace() barrier()
|
||||
#define preemptible() 0
|
||||
|
||||
+#define preempt_lazy_disable() barrier()
|
||||
+#define preempt_lazy_enable() barrier()
|
||||
+
|
||||
#endif /* CONFIG_PREEMPT_COUNT */
|
||||
|
||||
#ifdef MODULE
|
||||
@@ -311,7 +352,7 @@ do { \
|
||||
} while (0)
|
||||
#define preempt_fold_need_resched() \
|
||||
do { \
|
||||
- if (tif_need_resched()) \
|
||||
+ if (tif_need_resched_now()) \
|
||||
set_preempt_need_resched(); \
|
||||
} while (0)
|
||||
|
||||
@@ -427,8 +468,15 @@ extern void migrate_enable(void);
|
||||
|
||||
#else
|
||||
|
||||
-static inline void migrate_disable(void) { }
|
||||
-static inline void migrate_enable(void) { }
|
||||
+static inline void migrate_disable(void)
|
||||
+{
|
||||
+ preempt_lazy_disable();
|
||||
+}
|
||||
+
|
||||
+static inline void migrate_enable(void)
|
||||
+{
|
||||
+ preempt_lazy_enable();
|
||||
+}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
||||
index 0cac69902ec5..67ec36dbfacf 100644
|
||||
--- a/include/linux/sched.h
|
||||
+++ b/include/linux/sched.h
|
||||
@@ -2061,6 +2061,43 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
|
||||
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
|
||||
}
|
||||
|
||||
+#ifdef CONFIG_PREEMPT_LAZY
|
||||
+static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
|
||||
+{
|
||||
+ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
|
||||
+}
|
||||
+
|
||||
+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
|
||||
+{
|
||||
+ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
|
||||
+}
|
||||
+
|
||||
+static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
|
||||
+{
|
||||
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
|
||||
+}
|
||||
+
|
||||
+static inline int need_resched_lazy(void)
|
||||
+{
|
||||
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
|
||||
+}
|
||||
+
|
||||
+static inline int need_resched_now(void)
|
||||
+{
|
||||
+ return test_thread_flag(TIF_NEED_RESCHED);
|
||||
+}
|
||||
+
|
||||
+#else
|
||||
+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
|
||||
+static inline int need_resched_lazy(void) { return 0; }
|
||||
+
|
||||
+static inline int need_resched_now(void)
|
||||
+{
|
||||
+ return test_thread_flag(TIF_NEED_RESCHED);
|
||||
+}
|
||||
+
|
||||
+#endif
|
||||
+
|
||||
/*
|
||||
* cond_resched() and cond_resched_lock(): latency reduction via
|
||||
* explicit rescheduling in places that are safe. The return
|
||||
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
|
||||
index 9f392ec76f2b..779e0e96b9cb 100644
|
||||
--- a/include/linux/thread_info.h
|
||||
+++ b/include/linux/thread_info.h
|
||||
@@ -177,7 +177,17 @@ static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti
|
||||
clear_ti_thread_flag(task_thread_info(t), TIF_##fl)
|
||||
#endif /* !CONFIG_GENERIC_ENTRY */
|
||||
|
||||
-#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
|
||||
+#ifdef CONFIG_PREEMPT_LAZY
|
||||
+#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \
|
||||
+ test_thread_flag(TIF_NEED_RESCHED_LAZY))
|
||||
+#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED))
|
||||
+#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY)
|
||||
+
|
||||
+#else
|
||||
+#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
|
||||
+#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED)
|
||||
+#define tif_need_resched_lazy() 0
|
||||
+#endif
|
||||
|
||||
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
|
||||
static inline int arch_within_stack_frames(const void * const stack,
|
||||
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
|
||||
index c8b5e9781d01..743b1183d184 100644
|
||||
--- a/include/linux/trace_events.h
|
||||
+++ b/include/linux/trace_events.h
|
||||
@@ -70,6 +70,7 @@ struct trace_entry {
|
||||
unsigned char flags;
|
||||
unsigned char preempt_count;
|
||||
int pid;
|
||||
+ unsigned char preempt_lazy_count;
|
||||
};
|
||||
|
||||
#define TRACE_EVENT_TYPE_MAX \
|
||||
@@ -159,9 +160,10 @@ static inline void tracing_generic_entry_update(struct trace_entry *entry,
|
||||
unsigned int trace_ctx)
|
||||
{
|
||||
entry->preempt_count = trace_ctx & 0xff;
|
||||
+ entry->preempt_lazy_count = (trace_ctx >> 16) & 0xff;
|
||||
entry->pid = current->pid;
|
||||
entry->type = type;
|
||||
- entry->flags = trace_ctx >> 16;
|
||||
+ entry->flags = trace_ctx >> 24;
|
||||
}
|
||||
|
||||
unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
|
||||
@@ -172,7 +174,13 @@ enum trace_flag_type {
|
||||
TRACE_FLAG_NEED_RESCHED = 0x04,
|
||||
TRACE_FLAG_HARDIRQ = 0x08,
|
||||
TRACE_FLAG_SOFTIRQ = 0x10,
|
||||
+#ifdef CONFIG_PREEMPT_LAZY
|
||||
+ TRACE_FLAG_PREEMPT_RESCHED = 0x00,
|
||||
+ TRACE_FLAG_NEED_RESCHED_LAZY = 0x20,
|
||||
+#else
|
||||
+ TRACE_FLAG_NEED_RESCHED_LAZY = 0x00,
|
||||
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
|
||||
+#endif
|
||||
TRACE_FLAG_NMI = 0x40,
|
||||
TRACE_FLAG_BH_OFF = 0x80,
|
||||
};
|
||||
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
|
||||
index c2f1fd95a821..260c08efeb48 100644
|
||||
--- a/kernel/Kconfig.preempt
|
||||
+++ b/kernel/Kconfig.preempt
|
||||
@@ -1,5 +1,11 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
+config HAVE_PREEMPT_LAZY
|
||||
+ bool
|
||||
+
|
||||
+config PREEMPT_LAZY
|
||||
+ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT
|
||||
+
|
||||
config PREEMPT_NONE_BUILD
|
||||
bool
|
||||
|
||||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
||||
index 6bd06122850a..b72fc7d336e4 100644
|
||||
--- a/kernel/sched/core.c
|
||||
+++ b/kernel/sched/core.c
|
||||
@@ -1040,6 +1040,46 @@ void resched_curr(struct rq *rq)
|
||||
trace_sched_wake_idle_without_ipi(cpu);
|
||||
}
|
||||
|
||||
+#ifdef CONFIG_PREEMPT_LAZY
|
||||
+
|
||||
+static int tsk_is_polling(struct task_struct *p)
|
||||
+{
|
||||
+#ifdef TIF_POLLING_NRFLAG
|
||||
+ return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
|
||||
+#else
|
||||
+ return 0;
|
||||
+#endif
|
||||
+}
|
||||
+
|
||||
+void resched_curr_lazy(struct rq *rq)
|
||||
+{
|
||||
+ struct task_struct *curr = rq->curr;
|
||||
+ int cpu;
|
||||
+
|
||||
+ if (!sched_feat(PREEMPT_LAZY)) {
|
||||
+ resched_curr(rq);
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ if (test_tsk_need_resched(curr))
|
||||
+ return;
|
||||
+
|
||||
+ if (test_tsk_need_resched_lazy(curr))
|
||||
+ return;
|
||||
+
|
||||
+ set_tsk_need_resched_lazy(curr);
|
||||
+
|
||||
+ cpu = cpu_of(rq);
|
||||
+ if (cpu == smp_processor_id())
|
||||
+ return;
|
||||
+
|
||||
+ /* NEED_RESCHED_LAZY must be visible before we test polling */
|
||||
+ smp_mb();
|
||||
+ if (!tsk_is_polling(curr))
|
||||
+ smp_send_reschedule(cpu);
|
||||
+}
|
||||
+#endif
|
||||
+
|
||||
void resched_cpu(int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
@@ -2224,6 +2264,7 @@ void migrate_disable(void)
|
||||
preempt_disable();
|
||||
this_rq()->nr_pinned++;
|
||||
p->migration_disabled = 1;
|
||||
+ preempt_lazy_disable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(migrate_disable);
|
||||
@@ -2255,6 +2296,7 @@ void migrate_enable(void)
|
||||
barrier();
|
||||
p->migration_disabled = 0;
|
||||
this_rq()->nr_pinned--;
|
||||
+ preempt_lazy_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(migrate_enable);
|
||||
@@ -4722,6 +4764,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
|
||||
p->on_cpu = 0;
|
||||
#endif
|
||||
init_task_preempt_count(p);
|
||||
+#ifdef CONFIG_HAVE_PREEMPT_LAZY
|
||||
+ task_thread_info(p)->preempt_lazy_count = 0;
|
||||
+#endif
|
||||
#ifdef CONFIG_SMP
|
||||
plist_node_init(&p->pushable_tasks, MAX_PRIO);
|
||||
RB_CLEAR_NODE(&p->pushable_dl_tasks);
|
||||
@@ -6592,6 +6637,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
|
||||
|
||||
next = pick_next_task(rq, prev, &rf);
|
||||
clear_tsk_need_resched(prev);
|
||||
+ clear_tsk_need_resched_lazy(prev);
|
||||
clear_preempt_need_resched();
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
rq->last_seen_need_resched_ns = 0;
|
||||
@@ -6806,6 +6852,30 @@ static void __sched notrace preempt_schedule_common(void)
|
||||
} while (need_resched());
|
||||
}
|
||||
|
||||
+#ifdef CONFIG_PREEMPT_LAZY
|
||||
+/*
|
||||
+ * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is
|
||||
+ * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as
|
||||
+ * preempt_lazy_count counter >0.
|
||||
+ */
|
||||
+static __always_inline int preemptible_lazy(void)
|
||||
+{
|
||||
+ if (test_thread_flag(TIF_NEED_RESCHED))
|
||||
+ return 1;
|
||||
+ if (current_thread_info()->preempt_lazy_count)
|
||||
+ return 0;
|
||||
+ return 1;
|
||||
+}
|
||||
+
|
||||
+#else
|
||||
+
|
||||
+static inline int preemptible_lazy(void)
|
||||
+{
|
||||
+ return 1;
|
||||
+}
|
||||
+
|
||||
+#endif
|
||||
+
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
/*
|
||||
* This is the entry point to schedule() from in-kernel preemption
|
||||
@@ -6819,6 +6889,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
|
||||
*/
|
||||
if (likely(!preemptible()))
|
||||
return;
|
||||
+ if (!preemptible_lazy())
|
||||
+ return;
|
||||
preempt_schedule_common();
|
||||
}
|
||||
NOKPROBE_SYMBOL(preempt_schedule);
|
||||
@@ -6866,6 +6938,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
|
||||
if (likely(!preemptible()))
|
||||
return;
|
||||
|
||||
+ if (!preemptible_lazy())
|
||||
+ return;
|
||||
+
|
||||
do {
|
||||
/*
|
||||
* Because the function tracer can trace preempt_count_sub()
|
||||
@@ -9131,7 +9206,9 @@ void __init init_idle(struct task_struct *idle, int cpu)
|
||||
|
||||
/* Set the preempt count _outside_ the spinlocks! */
|
||||
init_idle_preempt_count(idle, cpu);
|
||||
-
|
||||
+#ifdef CONFIG_HAVE_PREEMPT_LAZY
|
||||
+ task_thread_info(idle)->preempt_lazy_count = 0;
|
||||
+#endif
|
||||
/*
|
||||
* The idle tasks have their own, simple scheduling class:
|
||||
*/
|
||||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
||||
index 2558ab9033be..2dc35af7b5a6 100644
|
||||
--- a/kernel/sched/fair.c
|
||||
+++ b/kernel/sched/fair.c
|
||||
@@ -4914,7 +4914,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
|
||||
ideal_runtime = sched_slice(cfs_rq, curr);
|
||||
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
|
||||
if (delta_exec > ideal_runtime) {
|
||||
- resched_curr(rq_of(cfs_rq));
|
||||
+ resched_curr_lazy(rq_of(cfs_rq));
|
||||
/*
|
||||
* The current task ran long enough, ensure it doesn't get
|
||||
* re-elected due to buddy favours.
|
||||
@@ -4938,7 +4938,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
|
||||
return;
|
||||
|
||||
if (delta > ideal_runtime)
|
||||
- resched_curr(rq_of(cfs_rq));
|
||||
+ resched_curr_lazy(rq_of(cfs_rq));
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -5084,7 +5084,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
|
||||
* validating it and just reschedule.
|
||||
*/
|
||||
if (queued) {
|
||||
- resched_curr(rq_of(cfs_rq));
|
||||
+ resched_curr_lazy(rq_of(cfs_rq));
|
||||
return;
|
||||
}
|
||||
/*
|
||||
@@ -5233,7 +5233,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
|
||||
* hierarchy can be throttled
|
||||
*/
|
||||
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
|
||||
- resched_curr(rq_of(cfs_rq));
|
||||
+ resched_curr_lazy(rq_of(cfs_rq));
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
@@ -5984,7 +5984,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
|
||||
|
||||
if (delta < 0) {
|
||||
if (task_current(rq, p))
|
||||
- resched_curr(rq);
|
||||
+ resched_curr_lazy(rq);
|
||||
return;
|
||||
}
|
||||
hrtick_start(rq, delta);
|
||||
@@ -7712,7 +7712,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
|
||||
return;
|
||||
|
||||
preempt:
|
||||
- resched_curr(rq);
|
||||
+ resched_curr_lazy(rq);
|
||||
/*
|
||||
* Only set the backward buddy when the current task is still
|
||||
* on the rq. This can happen when a wakeup gets interleaved
|
||||
@@ -11877,7 +11877,7 @@ static void task_fork_fair(struct task_struct *p)
|
||||
* 'current' within the tree based on its new key value.
|
||||
*/
|
||||
swap(curr->vruntime, se->vruntime);
|
||||
- resched_curr(rq);
|
||||
+ resched_curr_lazy(rq);
|
||||
}
|
||||
|
||||
se->vruntime -= cfs_rq->min_vruntime;
|
||||
@@ -11904,7 +11904,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
|
||||
*/
|
||||
if (task_current(rq, p)) {
|
||||
if (p->prio > oldprio)
|
||||
- resched_curr(rq);
|
||||
+ resched_curr_lazy(rq);
|
||||
} else
|
||||
check_preempt_curr(rq, p, 0);
|
||||
}
|
||||
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
|
||||
index ee7f23c76bd3..e13090e33f3c 100644
|
||||
--- a/kernel/sched/features.h
|
||||
+++ b/kernel/sched/features.h
|
||||
@@ -48,6 +48,9 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RT
|
||||
SCHED_FEAT(TTWU_QUEUE, false)
|
||||
+# ifdef CONFIG_PREEMPT_LAZY
|
||||
+SCHED_FEAT(PREEMPT_LAZY, true)
|
||||
+# endif
|
||||
#else
|
||||
|
||||
/*
|
||||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
|
||||
index b62d53d7c264..f2577f511a41 100644
|
||||
--- a/kernel/sched/sched.h
|
||||
+++ b/kernel/sched/sched.h
|
||||
@@ -2350,6 +2350,15 @@ extern void reweight_task(struct task_struct *p, int prio);
|
||||
extern void resched_curr(struct rq *rq);
|
||||
extern void resched_cpu(int cpu);
|
||||
|
||||
+#ifdef CONFIG_PREEMPT_LAZY
|
||||
+extern void resched_curr_lazy(struct rq *rq);
|
||||
+#else
|
||||
+static inline void resched_curr_lazy(struct rq *rq)
|
||||
+{
|
||||
+ resched_curr(rq);
|
||||
+}
|
||||
+#endif
|
||||
+
|
||||
extern struct rt_bandwidth def_rt_bandwidth;
|
||||
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
|
||||
extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
|
||||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
|
||||
index deae65af76ec..edf0407d5498 100644
|
||||
--- a/kernel/trace/trace.c
|
||||
+++ b/kernel/trace/trace.c
|
||||
@@ -2630,11 +2630,19 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
|
||||
if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
|
||||
trace_flags |= TRACE_FLAG_BH_OFF;
|
||||
|
||||
- if (tif_need_resched())
|
||||
+ if (tif_need_resched_now())
|
||||
trace_flags |= TRACE_FLAG_NEED_RESCHED;
|
||||
+#ifdef CONFIG_PREEMPT_LAZY
|
||||
+ /* Run out of bits. Share the LAZY and PREEMPT_RESCHED */
|
||||
+ if (need_resched_lazy())
|
||||
+ trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY;
|
||||
+#else
|
||||
if (test_preempt_need_resched())
|
||||
trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
|
||||
- return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
|
||||
+#endif
|
||||
+
|
||||
+ return (trace_flags << 24) | (min_t(unsigned int, pc & 0xff, 0xf)) |
|
||||
+ (preempt_lazy_count() & 0xff) << 16 |
|
||||
(min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
|
||||
}
|
||||
|
||||
@@ -4226,15 +4234,17 @@ unsigned long trace_total_entries(struct trace_array *tr)
|
||||
|
||||
static void print_lat_help_header(struct seq_file *m)
|
||||
{
|
||||
- seq_puts(m, "# _------=> CPU# \n"
|
||||
- "# / _-----=> irqs-off/BH-disabled\n"
|
||||
- "# | / _----=> need-resched \n"
|
||||
- "# || / _---=> hardirq/softirq \n"
|
||||
- "# ||| / _--=> preempt-depth \n"
|
||||
- "# |||| / _-=> migrate-disable \n"
|
||||
- "# ||||| / delay \n"
|
||||
- "# cmd pid |||||| time | caller \n"
|
||||
- "# \\ / |||||| \\ | / \n");
|
||||
+ seq_puts(m, "# _--------=> CPU# \n"
|
||||
+ "# / _-------=> irqs-off/BH-disabled\n"
|
||||
+ "# | / _------=> need-resched \n"
|
||||
+ "# || / _-----=> need-resched-lazy\n"
|
||||
+ "# ||| / _----=> hardirq/softirq \n"
|
||||
+ "# |||| / _---=> preempt-depth \n"
|
||||
+ "# ||||| / _--=> preempt-lazy-depth\n"
|
||||
+ "# |||||| / _-=> migrate-disable \n"
|
||||
+ "# ||||||| / delay \n"
|
||||
+ "# cmd pid |||||||| time | caller \n"
|
||||
+ "# \\ / |||||||| \\ | / \n");
|
||||
}
|
||||
|
||||
static void print_event_info(struct array_buffer *buf, struct seq_file *m)
|
||||
@@ -4268,14 +4278,16 @@ static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file
|
||||
|
||||
print_event_info(buf, m);
|
||||
|
||||
- seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
|
||||
- seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
|
||||
- seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
|
||||
- seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
|
||||
- seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
|
||||
- seq_printf(m, "# %.*s|||| / delay\n", prec, space);
|
||||
- seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
|
||||
- seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
|
||||
+ seq_printf(m, "# %.*s _-------=> irqs-off/BH-disabled\n", prec, space);
|
||||
+ seq_printf(m, "# %.*s / _------=> need-resched\n", prec, space);
|
||||
+ seq_printf(m, "# %.*s| / _-----=> need-resched-lazy\n", prec, space);
|
||||
+ seq_printf(m, "# %.*s|| / _----=> hardirq/softirq\n", prec, space);
|
||||
+ seq_printf(m, "# %.*s||| / _---=> preempt-depth\n", prec, space);
|
||||
+ seq_printf(m, "# %.*s|||| / _--=> preempt-lazy-depth\n", prec, space);
|
||||
+ seq_printf(m, "# %.*s||||| / _-=> migrate-disable\n", prec, space);
|
||||
+ seq_printf(m, "# %.*s|||||| / delay\n", prec, space);
|
||||
+ seq_printf(m, "# TASK-PID %.*s CPU# ||||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
|
||||
+ seq_printf(m, "# | | %.*s | ||||||| | |\n", prec, " | ");
|
||||
}
|
||||
|
||||
void
|
||||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
|
||||
index a6d2f99f847d..493c3f9cf01a 100644
|
||||
--- a/kernel/trace/trace_events.c
|
||||
+++ b/kernel/trace/trace_events.c
|
||||
@@ -208,6 +208,7 @@ static int trace_define_common_fields(void)
|
||||
/* Holds both preempt_count and migrate_disable */
|
||||
__common_field(unsigned char, preempt_count);
|
||||
__common_field(int, pid);
|
||||
+ __common_field(unsigned char, preempt_lazy_count);
|
||||
|
||||
return ret;
|
||||
}
|
||||
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
|
||||
index 5cd4fb656306..3c227e2843ae 100644
|
||||
--- a/kernel/trace/trace_output.c
|
||||
+++ b/kernel/trace/trace_output.c
|
||||
@@ -442,6 +442,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
|
||||
{
|
||||
char hardsoft_irq;
|
||||
char need_resched;
|
||||
+ char need_resched_lazy;
|
||||
char irqs_off;
|
||||
int hardirq;
|
||||
int softirq;
|
||||
@@ -462,20 +463,27 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
|
||||
|
||||
switch (entry->flags & (TRACE_FLAG_NEED_RESCHED |
|
||||
TRACE_FLAG_PREEMPT_RESCHED)) {
|
||||
+#ifndef CONFIG_PREEMPT_LAZY
|
||||
case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
|
||||
need_resched = 'N';
|
||||
break;
|
||||
+#endif
|
||||
case TRACE_FLAG_NEED_RESCHED:
|
||||
need_resched = 'n';
|
||||
break;
|
||||
+#ifndef CONFIG_PREEMPT_LAZY
|
||||
case TRACE_FLAG_PREEMPT_RESCHED:
|
||||
need_resched = 'p';
|
||||
break;
|
||||
+#endif
|
||||
default:
|
||||
need_resched = '.';
|
||||
break;
|
||||
}
|
||||
|
||||
+ need_resched_lazy =
|
||||
+ (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.';
|
||||
+
|
||||
hardsoft_irq =
|
||||
(nmi && hardirq) ? 'Z' :
|
||||
nmi ? 'z' :
|
||||
@@ -484,14 +492,20 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
|
||||
softirq ? 's' :
|
||||
'.' ;
|
||||
|
||||
- trace_seq_printf(s, "%c%c%c",
|
||||
- irqs_off, need_resched, hardsoft_irq);
|
||||
+ trace_seq_printf(s, "%c%c%c%c",
|
||||
+ irqs_off, need_resched, need_resched_lazy,
|
||||
+ hardsoft_irq);
|
||||
|
||||
if (entry->preempt_count & 0xf)
|
||||
trace_seq_printf(s, "%x", entry->preempt_count & 0xf);
|
||||
else
|
||||
trace_seq_putc(s, '.');
|
||||
|
||||
+ if (entry->preempt_lazy_count)
|
||||
+ trace_seq_printf(s, "%x", entry->preempt_lazy_count);
|
||||
+ else
|
||||
+ trace_seq_putc(s, '.');
|
||||
+
|
||||
if (entry->preempt_count & 0xf0)
|
||||
trace_seq_printf(s, "%x", entry->preempt_count >> 4);
|
||||
else
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,156 @@
|
|||
From 9b3ce8f4924d20af62e1958030c88c035e5664a6 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:22 +0206
|
||||
Subject: [PATCH 032/195] serial: 8250_bcm7271: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-6-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/8250/8250_bcm7271.c | 28 +++++++++++++-------------
|
||||
1 file changed, 14 insertions(+), 14 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
|
||||
index aa5aff046756..ff0662c68725 100644
|
||||
--- a/drivers/tty/serial/8250/8250_bcm7271.c
|
||||
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
|
||||
@@ -567,7 +567,7 @@ static irqreturn_t brcmuart_isr(int irq, void *dev_id)
|
||||
if (interrupts == 0)
|
||||
return IRQ_NONE;
|
||||
|
||||
- spin_lock_irqsave(&up->lock, flags);
|
||||
+ uart_port_lock_irqsave(up, &flags);
|
||||
|
||||
/* Clear all interrupts */
|
||||
udma_writel(priv, REGS_DMA_ISR, UDMA_INTR_CLEAR, interrupts);
|
||||
@@ -581,7 +581,7 @@ static irqreturn_t brcmuart_isr(int irq, void *dev_id)
|
||||
if ((rval | tval) == 0)
|
||||
dev_warn(dev, "Spurious interrupt: 0x%x\n", interrupts);
|
||||
|
||||
- spin_unlock_irqrestore(&up->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(up, flags);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@@ -608,10 +608,10 @@ static int brcmuart_startup(struct uart_port *port)
|
||||
*
|
||||
* Synchronize UART_IER access against the console.
|
||||
*/
|
||||
- spin_lock_irq(&port->lock);
|
||||
+ uart_port_lock_irq(port);
|
||||
up->ier &= ~UART_IER_RDI;
|
||||
serial_port_out(port, UART_IER, up->ier);
|
||||
- spin_unlock_irq(&port->lock);
|
||||
+ uart_port_unlock_irq(port);
|
||||
|
||||
priv->tx_running = false;
|
||||
priv->dma.rx_dma = NULL;
|
||||
@@ -629,7 +629,7 @@ static void brcmuart_shutdown(struct uart_port *port)
|
||||
struct brcmuart_priv *priv = up->port.private_data;
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
priv->shutdown = true;
|
||||
if (priv->dma_enabled) {
|
||||
stop_rx_dma(up);
|
||||
@@ -645,7 +645,7 @@ static void brcmuart_shutdown(struct uart_port *port)
|
||||
*/
|
||||
up->dma = NULL;
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
serial8250_do_shutdown(port);
|
||||
}
|
||||
|
||||
@@ -788,7 +788,7 @@ static int brcmuart_handle_irq(struct uart_port *p)
|
||||
* interrupt but there is no data ready.
|
||||
*/
|
||||
if (((iir & UART_IIR_ID) == UART_IIR_RX_TIMEOUT) && !(priv->shutdown)) {
|
||||
- spin_lock_irqsave(&p->lock, flags);
|
||||
+ uart_port_lock_irqsave(p, &flags);
|
||||
status = serial_port_in(p, UART_LSR);
|
||||
if ((status & UART_LSR_DR) == 0) {
|
||||
|
||||
@@ -813,7 +813,7 @@ static int brcmuart_handle_irq(struct uart_port *p)
|
||||
|
||||
handled = 1;
|
||||
}
|
||||
- spin_unlock_irqrestore(&p->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(p, flags);
|
||||
if (handled)
|
||||
return 1;
|
||||
}
|
||||
@@ -831,7 +831,7 @@ static enum hrtimer_restart brcmuart_hrtimer_func(struct hrtimer *t)
|
||||
if (priv->shutdown)
|
||||
return HRTIMER_NORESTART;
|
||||
|
||||
- spin_lock_irqsave(&p->lock, flags);
|
||||
+ uart_port_lock_irqsave(p, &flags);
|
||||
status = serial_port_in(p, UART_LSR);
|
||||
|
||||
/*
|
||||
@@ -855,7 +855,7 @@ static enum hrtimer_restart brcmuart_hrtimer_func(struct hrtimer *t)
|
||||
status |= UART_MCR_RTS;
|
||||
serial_port_out(p, UART_MCR, status);
|
||||
}
|
||||
- spin_unlock_irqrestore(&p->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(p, flags);
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
@@ -1154,10 +1154,10 @@ static int __maybe_unused brcmuart_suspend(struct device *dev)
|
||||
* This will prevent resume from enabling RTS before the
|
||||
* baud rate has been restored.
|
||||
*/
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
priv->saved_mctrl = port->mctrl;
|
||||
port->mctrl &= ~TIOCM_RTS;
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
serial8250_suspend_port(priv->line);
|
||||
clk_disable_unprepare(priv->baud_mux_clk);
|
||||
@@ -1196,10 +1196,10 @@ static int __maybe_unused brcmuart_resume(struct device *dev)
|
||||
|
||||
if (priv->saved_mctrl & TIOCM_RTS) {
|
||||
/* Restore RTS */
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
port->mctrl |= TIOCM_RTS;
|
||||
port->ops->set_mctrl(port, port->mctrl);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,472 @@
|
|||
From 580e52e63f02c9243031af81c819aa0098ba9d75 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:23 +0206
|
||||
Subject: [PATCH 033/195] serial: 8250: Use port lock wrappers
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-7-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/8250/8250_core.c | 12 ++--
|
||||
drivers/tty/serial/8250/8250_port.c | 100 ++++++++++++++--------------
|
||||
2 files changed, 56 insertions(+), 56 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
|
||||
index 3449f8790e46..904e319e6b4a 100644
|
||||
--- a/drivers/tty/serial/8250/8250_core.c
|
||||
+++ b/drivers/tty/serial/8250/8250_core.c
|
||||
@@ -259,7 +259,7 @@ static void serial8250_backup_timeout(struct timer_list *t)
|
||||
unsigned int iir, ier = 0, lsr;
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&up->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||||
|
||||
/*
|
||||
* Must disable interrupts or else we risk racing with the interrupt
|
||||
@@ -292,7 +292,7 @@ static void serial8250_backup_timeout(struct timer_list *t)
|
||||
if (up->port.irq)
|
||||
serial_out(up, UART_IER, ier);
|
||||
|
||||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||||
|
||||
/* Standard timer interval plus 0.2s to keep the port running */
|
||||
mod_timer(&up->timer,
|
||||
@@ -992,11 +992,11 @@ static void serial_8250_overrun_backoff_work(struct work_struct *work)
|
||||
struct uart_port *port = &up->port;
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
up->ier |= UART_IER_RLSI | UART_IER_RDI;
|
||||
up->port.read_status_mask |= UART_LSR_DR;
|
||||
serial_out(up, UART_IER, up->ier);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1194,9 +1194,9 @@ void serial8250_unregister_port(int line)
|
||||
if (uart->em485) {
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&uart->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&uart->port, &flags);
|
||||
serial8250_em485_destroy(uart);
|
||||
- spin_unlock_irqrestore(&uart->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&uart->port, flags);
|
||||
}
|
||||
|
||||
uart_remove_one_port(&serial8250_reg, &uart->port);
|
||||
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
|
||||
index 141627370aab..5b57254ae975 100644
|
||||
--- a/drivers/tty/serial/8250/8250_port.c
|
||||
+++ b/drivers/tty/serial/8250/8250_port.c
|
||||
@@ -689,7 +689,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
|
||||
|
||||
if (p->capabilities & UART_CAP_SLEEP) {
|
||||
/* Synchronize UART_IER access against the console. */
|
||||
- spin_lock_irq(&p->port.lock);
|
||||
+ uart_port_lock_irq(&p->port);
|
||||
if (p->capabilities & UART_CAP_EFR) {
|
||||
lcr = serial_in(p, UART_LCR);
|
||||
efr = serial_in(p, UART_EFR);
|
||||
@@ -703,7 +703,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
|
||||
serial_out(p, UART_EFR, efr);
|
||||
serial_out(p, UART_LCR, lcr);
|
||||
}
|
||||
- spin_unlock_irq(&p->port.lock);
|
||||
+ uart_port_unlock_irq(&p->port);
|
||||
}
|
||||
|
||||
serial8250_rpm_put(p);
|
||||
@@ -746,9 +746,9 @@ static void enable_rsa(struct uart_8250_port *up)
|
||||
{
|
||||
if (up->port.type == PORT_RSA) {
|
||||
if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
|
||||
- spin_lock_irq(&up->port.lock);
|
||||
+ uart_port_lock_irq(&up->port);
|
||||
__enable_rsa(up);
|
||||
- spin_unlock_irq(&up->port.lock);
|
||||
+ uart_port_unlock_irq(&up->port);
|
||||
}
|
||||
if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
|
||||
serial_out(up, UART_RSA_FRR, 0);
|
||||
@@ -768,7 +768,7 @@ static void disable_rsa(struct uart_8250_port *up)
|
||||
|
||||
if (up->port.type == PORT_RSA &&
|
||||
up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
|
||||
- spin_lock_irq(&up->port.lock);
|
||||
+ uart_port_lock_irq(&up->port);
|
||||
|
||||
mode = serial_in(up, UART_RSA_MSR);
|
||||
result = !(mode & UART_RSA_MSR_FIFO);
|
||||
@@ -781,7 +781,7 @@ static void disable_rsa(struct uart_8250_port *up)
|
||||
|
||||
if (result)
|
||||
up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
|
||||
- spin_unlock_irq(&up->port.lock);
|
||||
+ uart_port_unlock_irq(&up->port);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_SERIAL_8250_RSA */
|
||||
@@ -1172,7 +1172,7 @@ static void autoconfig(struct uart_8250_port *up)
|
||||
*
|
||||
* Synchronize UART_IER access against the console.
|
||||
*/
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
up->capabilities = 0;
|
||||
up->bugs = 0;
|
||||
@@ -1211,7 +1211,7 @@ static void autoconfig(struct uart_8250_port *up)
|
||||
/*
|
||||
* We failed; there's nothing here
|
||||
*/
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
DEBUG_AUTOCONF("IER test failed (%02x, %02x) ",
|
||||
scratch2, scratch3);
|
||||
goto out;
|
||||
@@ -1235,7 +1235,7 @@ static void autoconfig(struct uart_8250_port *up)
|
||||
status1 = serial_in(up, UART_MSR) & UART_MSR_STATUS_BITS;
|
||||
serial8250_out_MCR(up, save_mcr);
|
||||
if (status1 != (UART_MSR_DCD | UART_MSR_CTS)) {
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
DEBUG_AUTOCONF("LOOP test failed (%02x) ",
|
||||
status1);
|
||||
goto out;
|
||||
@@ -1304,7 +1304,7 @@ static void autoconfig(struct uart_8250_port *up)
|
||||
serial8250_clear_IER(up);
|
||||
|
||||
out_unlock:
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
/*
|
||||
* Check if the device is a Fintek F81216A
|
||||
@@ -1344,9 +1344,9 @@ static void autoconfig_irq(struct uart_8250_port *up)
|
||||
probe_irq_off(probe_irq_on());
|
||||
save_mcr = serial8250_in_MCR(up);
|
||||
/* Synchronize UART_IER access against the console. */
|
||||
- spin_lock_irq(&port->lock);
|
||||
+ uart_port_lock_irq(port);
|
||||
save_ier = serial_in(up, UART_IER);
|
||||
- spin_unlock_irq(&port->lock);
|
||||
+ uart_port_unlock_irq(port);
|
||||
serial8250_out_MCR(up, UART_MCR_OUT1 | UART_MCR_OUT2);
|
||||
|
||||
irqs = probe_irq_on();
|
||||
@@ -1359,9 +1359,9 @@ static void autoconfig_irq(struct uart_8250_port *up)
|
||||
UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
|
||||
}
|
||||
/* Synchronize UART_IER access against the console. */
|
||||
- spin_lock_irq(&port->lock);
|
||||
+ uart_port_lock_irq(port);
|
||||
serial_out(up, UART_IER, UART_IER_ALL_INTR);
|
||||
- spin_unlock_irq(&port->lock);
|
||||
+ uart_port_unlock_irq(port);
|
||||
serial_in(up, UART_LSR);
|
||||
serial_in(up, UART_RX);
|
||||
serial_in(up, UART_IIR);
|
||||
@@ -1372,9 +1372,9 @@ static void autoconfig_irq(struct uart_8250_port *up)
|
||||
|
||||
serial8250_out_MCR(up, save_mcr);
|
||||
/* Synchronize UART_IER access against the console. */
|
||||
- spin_lock_irq(&port->lock);
|
||||
+ uart_port_lock_irq(port);
|
||||
serial_out(up, UART_IER, save_ier);
|
||||
- spin_unlock_irq(&port->lock);
|
||||
+ uart_port_unlock_irq(port);
|
||||
|
||||
if (port->flags & UPF_FOURPORT)
|
||||
outb_p(save_ICP, ICP);
|
||||
@@ -1442,13 +1442,13 @@ static enum hrtimer_restart serial8250_em485_handle_stop_tx(struct hrtimer *t)
|
||||
unsigned long flags;
|
||||
|
||||
serial8250_rpm_get(p);
|
||||
- spin_lock_irqsave(&p->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&p->port, &flags);
|
||||
if (em485->active_timer == &em485->stop_tx_timer) {
|
||||
p->rs485_stop_tx(p);
|
||||
em485->active_timer = NULL;
|
||||
em485->tx_stopped = true;
|
||||
}
|
||||
- spin_unlock_irqrestore(&p->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&p->port, flags);
|
||||
serial8250_rpm_put(p);
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
@@ -1630,12 +1630,12 @@ static enum hrtimer_restart serial8250_em485_handle_start_tx(struct hrtimer *t)
|
||||
struct uart_8250_port *p = em485->port;
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&p->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&p->port, &flags);
|
||||
if (em485->active_timer == &em485->start_tx_timer) {
|
||||
__start_tx(&p->port);
|
||||
em485->active_timer = NULL;
|
||||
}
|
||||
- spin_unlock_irqrestore(&p->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&p->port, flags);
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
@@ -1918,7 +1918,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
|
||||
if (iir & UART_IIR_NO_INT)
|
||||
return 0;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
status = serial_lsr_in(up);
|
||||
|
||||
@@ -1988,9 +1988,9 @@ static int serial8250_tx_threshold_handle_irq(struct uart_port *port)
|
||||
if ((iir & UART_IIR_ID) == UART_IIR_THRI) {
|
||||
struct uart_8250_port *up = up_to_u8250p(port);
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
serial8250_tx_chars(up);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
iir = serial_port_in(port, UART_IIR);
|
||||
@@ -2005,10 +2005,10 @@ static unsigned int serial8250_tx_empty(struct uart_port *port)
|
||||
|
||||
serial8250_rpm_get(up);
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
if (!serial8250_tx_dma_running(up) && uart_lsr_tx_empty(serial_lsr_in(up)))
|
||||
result = TIOCSER_TEMT;
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
serial8250_rpm_put(up);
|
||||
|
||||
@@ -2070,13 +2070,13 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state)
|
||||
unsigned long flags;
|
||||
|
||||
serial8250_rpm_get(up);
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
if (break_state == -1)
|
||||
up->lcr |= UART_LCR_SBC;
|
||||
else
|
||||
up->lcr &= ~UART_LCR_SBC;
|
||||
serial_port_out(port, UART_LCR, up->lcr);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
serial8250_rpm_put(up);
|
||||
}
|
||||
|
||||
@@ -2211,7 +2211,7 @@ int serial8250_do_startup(struct uart_port *port)
|
||||
*
|
||||
* Synchronize UART_IER access against the console.
|
||||
*/
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
up->acr = 0;
|
||||
serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
|
||||
serial_port_out(port, UART_EFR, UART_EFR_ECB);
|
||||
@@ -2221,7 +2221,7 @@ int serial8250_do_startup(struct uart_port *port)
|
||||
serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
|
||||
serial_port_out(port, UART_EFR, UART_EFR_ECB);
|
||||
serial_port_out(port, UART_LCR, 0);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
if (port->type == PORT_DA830) {
|
||||
@@ -2230,10 +2230,10 @@ int serial8250_do_startup(struct uart_port *port)
|
||||
*
|
||||
* Synchronize UART_IER access against the console.
|
||||
*/
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
serial_port_out(port, UART_IER, 0);
|
||||
serial_port_out(port, UART_DA830_PWREMU_MGMT, 0);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
mdelay(10);
|
||||
|
||||
/* Enable Tx, Rx and free run mode */
|
||||
@@ -2347,7 +2347,7 @@ int serial8250_do_startup(struct uart_port *port)
|
||||
*
|
||||
* Synchronize UART_IER access against the console.
|
||||
*/
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
wait_for_xmitr(up, UART_LSR_THRE);
|
||||
serial_port_out_sync(port, UART_IER, UART_IER_THRI);
|
||||
@@ -2359,7 +2359,7 @@ int serial8250_do_startup(struct uart_port *port)
|
||||
iir = serial_port_in(port, UART_IIR);
|
||||
serial_port_out(port, UART_IER, 0);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
if (port->irqflags & IRQF_SHARED)
|
||||
enable_irq(port->irq);
|
||||
@@ -2382,7 +2382,7 @@ int serial8250_do_startup(struct uart_port *port)
|
||||
*/
|
||||
serial_port_out(port, UART_LCR, UART_LCR_WLEN8);
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
if (up->port.flags & UPF_FOURPORT) {
|
||||
if (!up->port.irq)
|
||||
up->port.mctrl |= TIOCM_OUT1;
|
||||
@@ -2428,7 +2428,7 @@ int serial8250_do_startup(struct uart_port *port)
|
||||
}
|
||||
|
||||
dont_test_tx_en:
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
/*
|
||||
* Clear the interrupt registers again for luck, and clear the
|
||||
@@ -2499,17 +2499,17 @@ void serial8250_do_shutdown(struct uart_port *port)
|
||||
*
|
||||
* Synchronize UART_IER access against the console.
|
||||
*/
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
up->ier = 0;
|
||||
serial_port_out(port, UART_IER, 0);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
synchronize_irq(port->irq);
|
||||
|
||||
if (up->dma)
|
||||
serial8250_release_dma(up);
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
if (port->flags & UPF_FOURPORT) {
|
||||
/* reset interrupts on the AST Fourport board */
|
||||
inb((port->iobase & 0xfe0) | 0x1f);
|
||||
@@ -2518,7 +2518,7 @@ void serial8250_do_shutdown(struct uart_port *port)
|
||||
port->mctrl &= ~TIOCM_OUT2;
|
||||
|
||||
serial8250_set_mctrl(port, port->mctrl);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
/*
|
||||
* Disable break condition and FIFOs
|
||||
@@ -2754,14 +2754,14 @@ void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk)
|
||||
quot = serial8250_get_divisor(port, baud, &frac);
|
||||
|
||||
serial8250_rpm_get(up);
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
uart_update_timeout(port, termios->c_cflag, baud);
|
||||
|
||||
serial8250_set_divisor(port, baud, quot, frac);
|
||||
serial_port_out(port, UART_LCR, up->lcr);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
serial8250_rpm_put(up);
|
||||
|
||||
out_unlock:
|
||||
@@ -2798,7 +2798,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
* Synchronize UART_IER access against the console.
|
||||
*/
|
||||
serial8250_rpm_get(up);
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
up->lcr = cval; /* Save computed LCR */
|
||||
|
||||
@@ -2901,7 +2901,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
serial_port_out(port, UART_FCR, up->fcr); /* set fcr */
|
||||
}
|
||||
serial8250_set_mctrl(port, port->mctrl);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
serial8250_rpm_put(up);
|
||||
|
||||
/* Don't rewrite B0 */
|
||||
@@ -2924,15 +2924,15 @@ void serial8250_do_set_ldisc(struct uart_port *port, struct ktermios *termios)
|
||||
{
|
||||
if (termios->c_line == N_PPS) {
|
||||
port->flags |= UPF_HARDPPS_CD;
|
||||
- spin_lock_irq(&port->lock);
|
||||
+ uart_port_lock_irq(port);
|
||||
serial8250_enable_ms(port);
|
||||
- spin_unlock_irq(&port->lock);
|
||||
+ uart_port_unlock_irq(port);
|
||||
} else {
|
||||
port->flags &= ~UPF_HARDPPS_CD;
|
||||
if (!UART_ENABLE_MS(port, termios->c_cflag)) {
|
||||
- spin_lock_irq(&port->lock);
|
||||
+ uart_port_lock_irq(port);
|
||||
serial8250_disable_ms(port);
|
||||
- spin_unlock_irq(&port->lock);
|
||||
+ uart_port_unlock_irq(port);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3406,9 +3406,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
|
||||
touch_nmi_watchdog();
|
||||
|
||||
if (oops_in_progress)
|
||||
- locked = spin_trylock_irqsave(&port->lock, flags);
|
||||
+ locked = uart_port_trylock_irqsave(port, &flags);
|
||||
else
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
/*
|
||||
* First save the IER then disable the interrupts
|
||||
@@ -3478,7 +3478,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
|
||||
serial8250_modem_status(up);
|
||||
|
||||
if (locked)
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static unsigned int probe_baud(struct uart_port *port)
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
From 4a7f3d980fdfa78e88ce38e493b5d39384f35079 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Tue, 30 Jun 2020 11:45:14 +0200
|
||||
Subject: [PATCH 33/62] x86/entry: Use should_resched() in
|
||||
idtentry_exit_cond_resched()
|
||||
|
||||
The TIF_NEED_RESCHED bit is inlined on x86 into the preemption counter.
|
||||
By using should_resched(0) instead of need_resched() the same check can
|
||||
be performed which uses the same variable as 'preempt_count()` which was
|
||||
issued before.
|
||||
|
||||
Use should_resched(0) instead need_resched().
|
||||
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
kernel/entry/common.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
|
||||
index be61332c66b5..97ff5faad4fb 100644
|
||||
--- a/kernel/entry/common.c
|
||||
+++ b/kernel/entry/common.c
|
||||
@@ -386,7 +386,7 @@ void raw_irqentry_exit_cond_resched(void)
|
||||
rcu_irq_exit_check_preempt();
|
||||
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
|
||||
WARN_ON_ONCE(!on_thread_stack());
|
||||
- if (need_resched())
|
||||
+ if (should_resched(0))
|
||||
preempt_schedule_irq();
|
||||
}
|
||||
}
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,85 @@
|
|||
From 70ceea48d1051ec15b3a8d66f62386b323e12e4e Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:24 +0206
|
||||
Subject: [PATCH 034/195] serial: 8250_dma: Use port lock wrappers
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-8-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/8250/8250_dma.c | 8 ++++----
|
||||
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
|
||||
index 7fa66501792d..8b30ca8fdd3f 100644
|
||||
--- a/drivers/tty/serial/8250/8250_dma.c
|
||||
+++ b/drivers/tty/serial/8250/8250_dma.c
|
||||
@@ -22,7 +22,7 @@ static void __dma_tx_complete(void *param)
|
||||
dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
|
||||
UART_XMIT_SIZE, DMA_TO_DEVICE);
|
||||
|
||||
- spin_lock_irqsave(&p->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&p->port, &flags);
|
||||
|
||||
dma->tx_running = 0;
|
||||
|
||||
@@ -35,7 +35,7 @@ static void __dma_tx_complete(void *param)
|
||||
if (ret || !dma->tx_running)
|
||||
serial8250_set_THRI(p);
|
||||
|
||||
- spin_unlock_irqrestore(&p->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&p->port, flags);
|
||||
}
|
||||
|
||||
static void __dma_rx_complete(struct uart_8250_port *p)
|
||||
@@ -70,7 +70,7 @@ static void dma_rx_complete(void *param)
|
||||
struct uart_8250_dma *dma = p->dma;
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&p->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&p->port, &flags);
|
||||
if (dma->rx_running)
|
||||
__dma_rx_complete(p);
|
||||
|
||||
@@ -80,7 +80,7 @@ static void dma_rx_complete(void *param)
|
||||
*/
|
||||
if (!dma->rx_running && (serial_lsr_in(p) & UART_LSR_DR))
|
||||
p->dma->rx_dma(p);
|
||||
- spin_unlock_irqrestore(&p->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&p->port, flags);
|
||||
}
|
||||
|
||||
int serial8250_tx_dma(struct uart_8250_port *p)
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,157 +0,0 @@
|
|||
From b73c9a31ab7a5c00891726ff9d5da0297243d335 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 1 Nov 2012 11:03:47 +0100
|
||||
Subject: [PATCH 34/62] x86: Support for lazy preemption
|
||||
|
||||
Implement the x86 pieces for lazy preempt.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
arch/x86/Kconfig | 1 +
|
||||
arch/x86/include/asm/preempt.h | 33 +++++++++++++++++++++++++++++-
|
||||
arch/x86/include/asm/thread_info.h | 7 +++++++
|
||||
include/linux/entry-common.h | 2 +-
|
||||
kernel/entry/common.c | 2 +-
|
||||
5 files changed, 42 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
|
||||
index c9bed9c69423..f38bd8a5061e 100644
|
||||
--- a/arch/x86/Kconfig
|
||||
+++ b/arch/x86/Kconfig
|
||||
@@ -251,6 +251,7 @@ config X86
|
||||
select HAVE_PCI
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
+ select HAVE_PREEMPT_LAZY
|
||||
select MMU_GATHER_RCU_TABLE_FREE if PARAVIRT
|
||||
select MMU_GATHER_MERGE_VMAS
|
||||
select HAVE_POSIX_CPU_TIMERS_TASK_WORK
|
||||
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
|
||||
index 5f6daea1ee24..cd20b4a5719a 100644
|
||||
--- a/arch/x86/include/asm/preempt.h
|
||||
+++ b/arch/x86/include/asm/preempt.h
|
||||
@@ -90,17 +90,48 @@ static __always_inline void __preempt_count_sub(int val)
|
||||
* a decrement which hits zero means we have no preempt_count and should
|
||||
* reschedule.
|
||||
*/
|
||||
-static __always_inline bool __preempt_count_dec_and_test(void)
|
||||
+static __always_inline bool ____preempt_count_dec_and_test(void)
|
||||
{
|
||||
return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var]));
|
||||
}
|
||||
|
||||
+static __always_inline bool __preempt_count_dec_and_test(void)
|
||||
+{
|
||||
+ if (____preempt_count_dec_and_test())
|
||||
+ return true;
|
||||
+#ifdef CONFIG_PREEMPT_LAZY
|
||||
+ if (preempt_count())
|
||||
+ return false;
|
||||
+ if (current_thread_info()->preempt_lazy_count)
|
||||
+ return false;
|
||||
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
|
||||
+#else
|
||||
+ return false;
|
||||
+#endif
|
||||
+}
|
||||
+
|
||||
/*
|
||||
* Returns true when we need to resched and can (barring IRQ state).
|
||||
*/
|
||||
static __always_inline bool should_resched(int preempt_offset)
|
||||
{
|
||||
+#ifdef CONFIG_PREEMPT_LAZY
|
||||
+ u32 tmp;
|
||||
+ tmp = raw_cpu_read_4(__preempt_count);
|
||||
+ if (tmp == preempt_offset)
|
||||
+ return true;
|
||||
+
|
||||
+ /* preempt count == 0 ? */
|
||||
+ tmp &= ~PREEMPT_NEED_RESCHED;
|
||||
+ if (tmp != preempt_offset)
|
||||
+ return false;
|
||||
+ /* XXX PREEMPT_LOCK_OFFSET */
|
||||
+ if (current_thread_info()->preempt_lazy_count)
|
||||
+ return false;
|
||||
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
|
||||
+#else
|
||||
return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
|
||||
+#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
|
||||
index f0cb881c1d69..0da06a9b5f72 100644
|
||||
--- a/arch/x86/include/asm/thread_info.h
|
||||
+++ b/arch/x86/include/asm/thread_info.h
|
||||
@@ -57,6 +57,8 @@ struct thread_info {
|
||||
unsigned long flags; /* low level flags */
|
||||
unsigned long syscall_work; /* SYSCALL_WORK_ flags */
|
||||
u32 status; /* thread synchronous flags */
|
||||
+ int preempt_lazy_count; /* 0 => lazy preemptable
|
||||
+ <0 => BUG */
|
||||
#ifdef CONFIG_SMP
|
||||
u32 cpu; /* current CPU */
|
||||
#endif
|
||||
@@ -65,6 +67,7 @@ struct thread_info {
|
||||
#define INIT_THREAD_INFO(tsk) \
|
||||
{ \
|
||||
.flags = 0, \
|
||||
+ .preempt_lazy_count = 0, \
|
||||
}
|
||||
|
||||
#else /* !__ASSEMBLY__ */
|
||||
@@ -92,6 +95,7 @@ struct thread_info {
|
||||
#define TIF_NOCPUID 15 /* CPUID is not accessible in userland */
|
||||
#define TIF_NOTSC 16 /* TSC is not accessible in userland */
|
||||
#define TIF_NOTIFY_SIGNAL 17 /* signal notifications exist */
|
||||
+#define TIF_NEED_RESCHED_LAZY 19 /* lazy rescheduling necessary */
|
||||
#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
|
||||
#define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */
|
||||
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
|
||||
@@ -115,6 +119,7 @@ struct thread_info {
|
||||
#define _TIF_NOCPUID (1 << TIF_NOCPUID)
|
||||
#define _TIF_NOTSC (1 << TIF_NOTSC)
|
||||
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
||||
+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
|
||||
#define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE)
|
||||
@@ -146,6 +151,8 @@ struct thread_info {
|
||||
|
||||
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
|
||||
|
||||
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
|
||||
+
|
||||
#define STACK_WARN (THREAD_SIZE/8)
|
||||
|
||||
/*
|
||||
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
|
||||
index d95ab85f96ba..93cc1ae12125 100644
|
||||
--- a/include/linux/entry-common.h
|
||||
+++ b/include/linux/entry-common.h
|
||||
@@ -59,7 +59,7 @@
|
||||
|
||||
#define EXIT_TO_USER_MODE_WORK \
|
||||
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
|
||||
- _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
|
||||
+ _TIF_NEED_RESCHED_MASK | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
|
||||
ARCH_EXIT_TO_USER_MODE_WORK)
|
||||
|
||||
/**
|
||||
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
|
||||
index 97ff5faad4fb..c6301e520d47 100644
|
||||
--- a/kernel/entry/common.c
|
||||
+++ b/kernel/entry/common.c
|
||||
@@ -155,7 +155,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
|
||||
|
||||
local_irq_enable_exit_to_user(ti_work);
|
||||
|
||||
- if (ti_work & _TIF_NEED_RESCHED)
|
||||
+ if (ti_work & _TIF_NEED_RESCHED_MASK)
|
||||
schedule();
|
||||
|
||||
if (ti_work & _TIF_UPROBE)
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,48 +0,0 @@
|
|||
From a17c5b9f7e3fef4ab8b0a87fa33e6c89f6c89cba Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Tue, 13 Jul 2021 07:52:52 +0200
|
||||
Subject: [PATCH 35/62] entry: Fix the preempt lazy fallout
|
||||
|
||||
Common code needs common defines....
|
||||
|
||||
Fixes: f2f9e496208c ("x86: Support for lazy preemption")
|
||||
Reported-by: kernel test robot <lkp@intel.com>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
arch/x86/include/asm/thread_info.h | 2 --
|
||||
include/linux/entry-common.h | 6 ++++++
|
||||
2 files changed, 6 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
|
||||
index 0da06a9b5f72..fd8fb76f324f 100644
|
||||
--- a/arch/x86/include/asm/thread_info.h
|
||||
+++ b/arch/x86/include/asm/thread_info.h
|
||||
@@ -151,8 +151,6 @@ struct thread_info {
|
||||
|
||||
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
|
||||
|
||||
-#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
|
||||
-
|
||||
#define STACK_WARN (THREAD_SIZE/8)
|
||||
|
||||
/*
|
||||
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
|
||||
index 93cc1ae12125..3dc3704a3cdb 100644
|
||||
--- a/include/linux/entry-common.h
|
||||
+++ b/include/linux/entry-common.h
|
||||
@@ -57,6 +57,12 @@
|
||||
# define ARCH_EXIT_TO_USER_MODE_WORK (0)
|
||||
#endif
|
||||
|
||||
+#ifdef CONFIG_PREEMPT_LAZY
|
||||
+# define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
|
||||
+#else
|
||||
+# define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED)
|
||||
+#endif
|
||||
+
|
||||
#define EXIT_TO_USER_MODE_WORK \
|
||||
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
|
||||
_TIF_NEED_RESCHED_MASK | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,74 @@
|
|||
From 5cca2b99cbc7d88d8eeb8087e9130e127c83d385 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:25 +0206
|
||||
Subject: [PATCH 035/195] serial: 8250_dw: Use port lock wrappers
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-9-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/8250/8250_dw.c | 8 ++++----
|
||||
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
|
||||
index a1f2259cc9a9..53c284bb271d 100644
|
||||
--- a/drivers/tty/serial/8250/8250_dw.c
|
||||
+++ b/drivers/tty/serial/8250/8250_dw.c
|
||||
@@ -263,20 +263,20 @@ static int dw8250_handle_irq(struct uart_port *p)
|
||||
* so we limit the workaround only to non-DMA mode.
|
||||
*/
|
||||
if (!up->dma && rx_timeout) {
|
||||
- spin_lock_irqsave(&p->lock, flags);
|
||||
+ uart_port_lock_irqsave(p, &flags);
|
||||
status = serial_lsr_in(up);
|
||||
|
||||
if (!(status & (UART_LSR_DR | UART_LSR_BI)))
|
||||
(void) p->serial_in(p, UART_RX);
|
||||
|
||||
- spin_unlock_irqrestore(&p->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(p, flags);
|
||||
}
|
||||
|
||||
/* Manually stop the Rx DMA transfer when acting as flow controller */
|
||||
if (quirks & DW_UART_QUIRK_IS_DMA_FC && up->dma && up->dma->rx_running && rx_timeout) {
|
||||
- spin_lock_irqsave(&p->lock, flags);
|
||||
+ uart_port_lock_irqsave(p, &flags);
|
||||
status = serial_lsr_in(up);
|
||||
- spin_unlock_irqrestore(&p->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(p, flags);
|
||||
|
||||
if (status & (UART_LSR_DR | UART_LSR_BI)) {
|
||||
dw8250_writel_ext(p, RZN1_UART_RDMACR, 0);
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,136 +0,0 @@
|
|||
From 93b892ee12b8eb43a72f308c981f3c68c6ae8b45 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Wed, 31 Oct 2012 12:04:11 +0100
|
||||
Subject: [PATCH 36/62] arm: Add support for lazy preemption
|
||||
|
||||
Implement the arm pieces for lazy preempt.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
arch/arm/Kconfig | 1 +
|
||||
arch/arm/include/asm/thread_info.h | 6 +++++-
|
||||
arch/arm/kernel/asm-offsets.c | 1 +
|
||||
arch/arm/kernel/entry-armv.S | 19 ++++++++++++++++---
|
||||
arch/arm/kernel/signal.c | 3 ++-
|
||||
5 files changed, 25 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
|
||||
index 6d5afe2e6ba3..717e596dc13b 100644
|
||||
--- a/arch/arm/Kconfig
|
||||
+++ b/arch/arm/Kconfig
|
||||
@@ -115,6 +115,7 @@ config ARM
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
+ select HAVE_PREEMPT_LAZY
|
||||
select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_RSEQ
|
||||
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
|
||||
index 7f092cb55a41..ffcbf8ebed4b 100644
|
||||
--- a/arch/arm/include/asm/thread_info.h
|
||||
+++ b/arch/arm/include/asm/thread_info.h
|
||||
@@ -62,6 +62,7 @@ struct cpu_context_save {
|
||||
struct thread_info {
|
||||
unsigned long flags; /* low level flags */
|
||||
int preempt_count; /* 0 => preemptable, <0 => bug */
|
||||
+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
|
||||
__u32 cpu; /* cpu */
|
||||
__u32 cpu_domain; /* cpu domain */
|
||||
struct cpu_context_save cpu_context; /* cpu context */
|
||||
@@ -129,6 +130,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
|
||||
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
|
||||
#define TIF_UPROBE 3 /* breakpointed or singlestepping */
|
||||
#define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */
|
||||
+#define TIF_NEED_RESCHED_LAZY 5
|
||||
|
||||
#define TIF_USING_IWMMXT 17
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
@@ -148,6 +150,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
||||
+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
|
||||
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
|
||||
|
||||
/* Checks for any syscall work in entry-common.S */
|
||||
@@ -157,7 +160,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
|
||||
/*
|
||||
* Change these and you break ASM code in entry-common.S
|
||||
*/
|
||||
-#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
|
||||
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
|
||||
+ _TIF_SIGPENDING | \
|
||||
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
|
||||
_TIF_NOTIFY_SIGNAL)
|
||||
|
||||
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
|
||||
index 2c8d76fd7c66..c3bdec7d2df9 100644
|
||||
--- a/arch/arm/kernel/asm-offsets.c
|
||||
+++ b/arch/arm/kernel/asm-offsets.c
|
||||
@@ -43,6 +43,7 @@ int main(void)
|
||||
BLANK();
|
||||
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
|
||||
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
|
||||
+ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
|
||||
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
|
||||
DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain));
|
||||
DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context));
|
||||
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
|
||||
index c39303e5c234..cfb4660e9fea 100644
|
||||
--- a/arch/arm/kernel/entry-armv.S
|
||||
+++ b/arch/arm/kernel/entry-armv.S
|
||||
@@ -222,11 +222,18 @@ ENDPROC(__dabt_svc)
|
||||
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
|
||||
- ldr r0, [tsk, #TI_FLAGS] @ get flags
|
||||
teq r8, #0 @ if preempt count != 0
|
||||
+ bne 1f @ return from exeption
|
||||
+ ldr r0, [tsk, #TI_FLAGS] @ get flags
|
||||
+ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
|
||||
+ blne svc_preempt @ preempt!
|
||||
+
|
||||
+ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
|
||||
+ teq r8, #0 @ if preempt lazy count != 0
|
||||
movne r0, #0 @ force flags to 0
|
||||
- tst r0, #_TIF_NEED_RESCHED
|
||||
+ tst r0, #_TIF_NEED_RESCHED_LAZY
|
||||
blne svc_preempt
|
||||
+1:
|
||||
#endif
|
||||
|
||||
svc_exit r5, irq = 1 @ return from exception
|
||||
@@ -241,8 +248,14 @@ ENDPROC(__irq_svc)
|
||||
1: bl preempt_schedule_irq @ irq en/disable is done inside
|
||||
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
|
||||
tst r0, #_TIF_NEED_RESCHED
|
||||
+ bne 1b
|
||||
+ tst r0, #_TIF_NEED_RESCHED_LAZY
|
||||
reteq r8 @ go again
|
||||
- b 1b
|
||||
+ ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
|
||||
+ teq r0, #0 @ if preempt lazy count != 0
|
||||
+ beq 1b
|
||||
+ ret r8 @ go again
|
||||
+
|
||||
#endif
|
||||
|
||||
__und_fault:
|
||||
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
|
||||
index e07f359254c3..b50a3248e79f 100644
|
||||
--- a/arch/arm/kernel/signal.c
|
||||
+++ b/arch/arm/kernel/signal.c
|
||||
@@ -607,7 +607,8 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
|
||||
*/
|
||||
trace_hardirqs_off();
|
||||
do {
|
||||
- if (likely(thread_flags & _TIF_NEED_RESCHED)) {
|
||||
+ if (likely(thread_flags & (_TIF_NEED_RESCHED |
|
||||
+ _TIF_NEED_RESCHED_LAZY))) {
|
||||
schedule();
|
||||
} else {
|
||||
if (unlikely(!user_mode(regs)))
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
From 6dcc66687c18aa95a2fc928da69a9f68f97b08c2 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:26 +0206
|
||||
Subject: [PATCH 036/195] serial: 8250_exar: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-10-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/8250/8250_exar.c | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
|
||||
index 8385be846840..91cf690b7c71 100644
|
||||
--- a/drivers/tty/serial/8250/8250_exar.c
|
||||
+++ b/drivers/tty/serial/8250/8250_exar.c
|
||||
@@ -201,9 +201,9 @@ static int xr17v35x_startup(struct uart_port *port)
|
||||
*
|
||||
* Synchronize UART_IER access against the console.
|
||||
*/
|
||||
- spin_lock_irq(&port->lock);
|
||||
+ uart_port_lock_irq(port);
|
||||
serial_port_out(port, UART_IER, 0);
|
||||
- spin_unlock_irq(&port->lock);
|
||||
+ uart_port_unlock_irq(port);
|
||||
|
||||
return serial8250_do_startup(port);
|
||||
}
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,117 +0,0 @@
|
|||
From 27570b59eda95f93f62c30d343f9c913a9d2a137 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 1 Nov 2012 10:14:11 +0100
|
||||
Subject: [PATCH 37/62] powerpc: Add support for lazy preemption
|
||||
|
||||
Implement the powerpc pieces for lazy preempt.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
arch/powerpc/Kconfig | 1 +
|
||||
arch/powerpc/include/asm/thread_info.h | 8 ++++++++
|
||||
arch/powerpc/kernel/interrupt.c | 8 ++++++--
|
||||
3 files changed, 15 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
|
||||
index 6050e6e10d32..0eff864d6ec3 100644
|
||||
--- a/arch/powerpc/Kconfig
|
||||
+++ b/arch/powerpc/Kconfig
|
||||
@@ -242,6 +242,7 @@ config PPC
|
||||
select HAVE_PERF_EVENTS_NMI if PPC64
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
+ select HAVE_PREEMPT_LAZY
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_RELIABLE_STACKTRACE
|
||||
select HAVE_RSEQ
|
||||
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
|
||||
index af58f1ed3952..520864de8bb2 100644
|
||||
--- a/arch/powerpc/include/asm/thread_info.h
|
||||
+++ b/arch/powerpc/include/asm/thread_info.h
|
||||
@@ -53,6 +53,8 @@
|
||||
struct thread_info {
|
||||
int preempt_count; /* 0 => preemptable,
|
||||
<0 => BUG */
|
||||
+ int preempt_lazy_count; /* 0 => preemptable,
|
||||
+ <0 => BUG */
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned int cpu;
|
||||
#endif
|
||||
@@ -77,6 +79,7 @@ struct thread_info {
|
||||
#define INIT_THREAD_INFO(tsk) \
|
||||
{ \
|
||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||
+ .preempt_lazy_count = 0, \
|
||||
.flags = 0, \
|
||||
}
|
||||
|
||||
@@ -102,6 +105,7 @@ void arch_setup_new_exec(void);
|
||||
#define TIF_PATCH_PENDING 6 /* pending live patching update */
|
||||
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
|
||||
#define TIF_SINGLESTEP 8 /* singlestepping active */
|
||||
+#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */
|
||||
#define TIF_SECCOMP 10 /* secure computing */
|
||||
#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
|
||||
#define TIF_NOERROR 12 /* Force successful syscall return */
|
||||
@@ -117,6 +121,7 @@ void arch_setup_new_exec(void);
|
||||
#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_32BIT 20 /* 32 bit binary */
|
||||
|
||||
+
|
||||
/* as above, but as bit values */
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||
@@ -128,6 +133,7 @@ void arch_setup_new_exec(void);
|
||||
#define _TIF_PATCH_PENDING (1<<TIF_PATCH_PENDING)
|
||||
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
|
||||
+#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
|
||||
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
|
||||
#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
|
||||
#define _TIF_NOERROR (1<<TIF_NOERROR)
|
||||
@@ -141,10 +147,12 @@ void arch_setup_new_exec(void);
|
||||
_TIF_SYSCALL_EMU)
|
||||
|
||||
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
|
||||
+ _TIF_NEED_RESCHED_LAZY | \
|
||||
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
|
||||
_TIF_RESTORE_TM | _TIF_PATCH_PENDING | \
|
||||
_TIF_NOTIFY_SIGNAL)
|
||||
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
|
||||
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
|
||||
|
||||
/* Bits in local_flags */
|
||||
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
|
||||
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
|
||||
index cf770d86c03c..2c454731c250 100644
|
||||
--- a/arch/powerpc/kernel/interrupt.c
|
||||
+++ b/arch/powerpc/kernel/interrupt.c
|
||||
@@ -186,7 +186,7 @@ interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs)
|
||||
ti_flags = read_thread_flags();
|
||||
while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
|
||||
local_irq_enable();
|
||||
- if (ti_flags & _TIF_NEED_RESCHED) {
|
||||
+ if (ti_flags & _TIF_NEED_RESCHED_MASK) {
|
||||
schedule();
|
||||
} else {
|
||||
/*
|
||||
@@ -397,11 +397,15 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
|
||||
/* Returning to a kernel context with local irqs enabled. */
|
||||
WARN_ON_ONCE(!(regs->msr & MSR_EE));
|
||||
again:
|
||||
- if (IS_ENABLED(CONFIG_PREEMPT)) {
|
||||
+ if (IS_ENABLED(CONFIG_PREEMPTION)) {
|
||||
/* Return to preemptible kernel context */
|
||||
if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
|
||||
if (preempt_count() == 0)
|
||||
preempt_schedule_irq();
|
||||
+ } else if (unlikely(current_thread_info()->flags & _TIF_NEED_RESCHED_LAZY)) {
|
||||
+ if ((preempt_count() == 0) &&
|
||||
+ (current_thread_info()->preempt_lazy_count == 0))
|
||||
+ preempt_schedule_irq();
|
||||
}
|
||||
}
|
||||
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
From b7a99e7acecb90b637d07620ed35754c7152004e Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:27 +0206
|
||||
Subject: [PATCH 037/195] serial: 8250_fsl: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-11-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/8250/8250_fsl.c | 6 +++---
|
||||
1 file changed, 3 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
|
||||
index 6af4e1c1210a..f522eb5026c9 100644
|
||||
--- a/drivers/tty/serial/8250/8250_fsl.c
|
||||
+++ b/drivers/tty/serial/8250/8250_fsl.c
|
||||
@@ -30,11 +30,11 @@ int fsl8250_handle_irq(struct uart_port *port)
|
||||
unsigned int iir;
|
||||
struct uart_8250_port *up = up_to_u8250p(port);
|
||||
|
||||
- spin_lock_irqsave(&up->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||||
|
||||
iir = port->serial_in(port, UART_IIR);
|
||||
if (iir & UART_IIR_NO_INT) {
|
||||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ int fsl8250_handle_irq(struct uart_port *port)
|
||||
if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) {
|
||||
up->lsr_saved_flags &= ~UART_LSR_BI;
|
||||
port->serial_in(port, UART_RX);
|
||||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||||
return 1;
|
||||
}
|
||||
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,145 +0,0 @@
|
|||
From 87fb6813fa0a5ecff7fd2c657b37cfe97733ae90 Mon Sep 17 00:00:00 2001
|
||||
From: Anders Roxell <anders.roxell@linaro.org>
|
||||
Date: Thu, 14 May 2015 17:52:17 +0200
|
||||
Subject: [PATCH 38/62] arch/arm64: Add lazy preempt support
|
||||
|
||||
arm64 is missing support for PREEMPT_RT. The main feature which is
|
||||
lacking is support for lazy preemption. The arch-specific entry code,
|
||||
thread information structure definitions, and associated data tables
|
||||
have to be extended to provide this support. Then the Kconfig file has
|
||||
to be extended to indicate the support is available, and also to
|
||||
indicate that support for full RT preemption is now available.
|
||||
|
||||
Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
arch/arm64/Kconfig | 1 +
|
||||
arch/arm64/include/asm/preempt.h | 25 ++++++++++++++++++++++++-
|
||||
arch/arm64/include/asm/thread_info.h | 8 +++++++-
|
||||
arch/arm64/kernel/asm-offsets.c | 1 +
|
||||
arch/arm64/kernel/signal.c | 2 +-
|
||||
5 files changed, 34 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
|
||||
index ea70eb960565..6e16670a7f43 100644
|
||||
--- a/arch/arm64/Kconfig
|
||||
+++ b/arch/arm64/Kconfig
|
||||
@@ -199,6 +199,7 @@ config ARM64
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_PREEMPT_DYNAMIC_KEY
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
+ select HAVE_PREEMPT_LAZY
|
||||
select HAVE_POSIX_CPU_TIMERS_TASK_WORK
|
||||
select HAVE_FUNCTION_ARG_ACCESS_API
|
||||
select MMU_GATHER_RCU_TABLE_FREE
|
||||
diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
|
||||
index 0159b625cc7f..a5486918e5ee 100644
|
||||
--- a/arch/arm64/include/asm/preempt.h
|
||||
+++ b/arch/arm64/include/asm/preempt.h
|
||||
@@ -71,13 +71,36 @@ static inline bool __preempt_count_dec_and_test(void)
|
||||
* interrupt occurring between the non-atomic READ_ONCE/WRITE_ONCE
|
||||
* pair.
|
||||
*/
|
||||
- return !pc || !READ_ONCE(ti->preempt_count);
|
||||
+ if (!pc || !READ_ONCE(ti->preempt_count))
|
||||
+ return true;
|
||||
+#ifdef CONFIG_PREEMPT_LAZY
|
||||
+ if ((pc & ~PREEMPT_NEED_RESCHED))
|
||||
+ return false;
|
||||
+ if (current_thread_info()->preempt_lazy_count)
|
||||
+ return false;
|
||||
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
|
||||
+#else
|
||||
+ return false;
|
||||
+#endif
|
||||
}
|
||||
|
||||
static inline bool should_resched(int preempt_offset)
|
||||
{
|
||||
+#ifdef CONFIG_PREEMPT_LAZY
|
||||
+ u64 pc = READ_ONCE(current_thread_info()->preempt_count);
|
||||
+ if (pc == preempt_offset)
|
||||
+ return true;
|
||||
+
|
||||
+ if ((pc & ~PREEMPT_NEED_RESCHED) != preempt_offset)
|
||||
+ return false;
|
||||
+
|
||||
+ if (current_thread_info()->preempt_lazy_count)
|
||||
+ return false;
|
||||
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
|
||||
+#else
|
||||
u64 pc = READ_ONCE(current_thread_info()->preempt_count);
|
||||
return pc == preempt_offset;
|
||||
+#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
|
||||
index 848739c15de8..4b7148fd5551 100644
|
||||
--- a/arch/arm64/include/asm/thread_info.h
|
||||
+++ b/arch/arm64/include/asm/thread_info.h
|
||||
@@ -26,6 +26,7 @@ struct thread_info {
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
u64 ttbr0; /* saved TTBR0_EL1 */
|
||||
#endif
|
||||
+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
|
||||
union {
|
||||
u64 preempt_count; /* 0 => preemptible, <0 => bug */
|
||||
struct {
|
||||
@@ -68,6 +69,7 @@ int arch_dup_task_struct(struct task_struct *dst,
|
||||
#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
|
||||
#define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */
|
||||
#define TIF_NOTIFY_SIGNAL 6 /* signal notifications exist */
|
||||
+#define TIF_NEED_RESCHED_LAZY 7
|
||||
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
|
||||
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing */
|
||||
#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
|
||||
@@ -100,8 +102,10 @@ int arch_dup_task_struct(struct task_struct *dst,
|
||||
#define _TIF_SVE (1 << TIF_SVE)
|
||||
#define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT)
|
||||
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
||||
+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
|
||||
|
||||
-#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
|
||||
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
|
||||
+ _TIF_SIGPENDING | \
|
||||
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
|
||||
_TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \
|
||||
_TIF_NOTIFY_SIGNAL)
|
||||
@@ -110,6 +114,8 @@ int arch_dup_task_struct(struct task_struct *dst,
|
||||
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
|
||||
_TIF_SYSCALL_EMU)
|
||||
|
||||
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
|
||||
+
|
||||
#ifdef CONFIG_SHADOW_CALL_STACK
|
||||
#define INIT_SCS \
|
||||
.scs_base = init_shadow_call_stack, \
|
||||
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
|
||||
index 1197e7679882..e74c0415f67e 100644
|
||||
--- a/arch/arm64/kernel/asm-offsets.c
|
||||
+++ b/arch/arm64/kernel/asm-offsets.c
|
||||
@@ -32,6 +32,7 @@ int main(void)
|
||||
DEFINE(TSK_TI_CPU, offsetof(struct task_struct, thread_info.cpu));
|
||||
DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
|
||||
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
|
||||
+ DEFINE(TSK_TI_PREEMPT_LAZY, offsetof(struct task_struct, thread_info.preempt_lazy_count));
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
|
||||
#endif
|
||||
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
|
||||
index 82f4572c8ddf..2a606c7bf025 100644
|
||||
--- a/arch/arm64/kernel/signal.c
|
||||
+++ b/arch/arm64/kernel/signal.c
|
||||
@@ -1108,7 +1108,7 @@ static void do_signal(struct pt_regs *regs)
|
||||
void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
|
||||
{
|
||||
do {
|
||||
- if (thread_flags & _TIF_NEED_RESCHED) {
|
||||
+ if (thread_flags & _TIF_NEED_RESCHED_MASK) {
|
||||
/* Unmask Debug and SError for the next task */
|
||||
local_daif_restore(DAIF_PROCCTX_NOIRQ);
|
||||
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,82 @@
|
|||
From a91dd09d7fdf4ded721028676a9f5e44a2a754af Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:28 +0206
|
||||
Subject: [PATCH 038/195] serial: 8250_mtk: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Reviewed-by: Chen-Yu Tsai <wenst@chromium.org>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-12-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/8250/8250_mtk.c | 8 ++++----
|
||||
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
|
||||
index 74da5676ce67..23457daae8a1 100644
|
||||
--- a/drivers/tty/serial/8250/8250_mtk.c
|
||||
+++ b/drivers/tty/serial/8250/8250_mtk.c
|
||||
@@ -102,7 +102,7 @@ static void mtk8250_dma_rx_complete(void *param)
|
||||
if (data->rx_status == DMA_RX_SHUTDOWN)
|
||||
return;
|
||||
|
||||
- spin_lock_irqsave(&up->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||||
|
||||
dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
|
||||
total = dma->rx_size - state.residue;
|
||||
@@ -128,7 +128,7 @@ static void mtk8250_dma_rx_complete(void *param)
|
||||
|
||||
mtk8250_rx_dma(up);
|
||||
|
||||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||||
}
|
||||
|
||||
static void mtk8250_rx_dma(struct uart_8250_port *up)
|
||||
@@ -368,7 +368,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
* Ok, we're now changing the port state. Do it with
|
||||
* interrupts disabled.
|
||||
*/
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
/*
|
||||
* Update the per-port timeout.
|
||||
@@ -416,7 +416,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
if (uart_console(port))
|
||||
up->port.cons->cflag = termios->c_cflag;
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
/* Don't rewrite B0 */
|
||||
if (tty_termios_baud_rate(termios))
|
||||
tty_termios_encode_baud_rate(termios, baud, baud);
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,241 @@
|
|||
From 4b82e23b67db0cbe2a97110597d9c97137fd3c5c Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:29 +0206
|
||||
Subject: [PATCH 039/195] serial: 8250_omap: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-13-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/8250/8250_omap.c | 52 ++++++++++++++---------------
|
||||
1 file changed, 26 insertions(+), 26 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
|
||||
index 346167afe9e1..db5519ce0192 100644
|
||||
--- a/drivers/tty/serial/8250/8250_omap.c
|
||||
+++ b/drivers/tty/serial/8250/8250_omap.c
|
||||
@@ -401,7 +401,7 @@ static void omap_8250_set_termios(struct uart_port *port,
|
||||
* interrupts disabled.
|
||||
*/
|
||||
pm_runtime_get_sync(port->dev);
|
||||
- spin_lock_irq(&port->lock);
|
||||
+ uart_port_lock_irq(port);
|
||||
|
||||
/*
|
||||
* Update the per-port timeout.
|
||||
@@ -504,7 +504,7 @@ static void omap_8250_set_termios(struct uart_port *port,
|
||||
}
|
||||
omap8250_restore_regs(up);
|
||||
|
||||
- spin_unlock_irq(&up->port.lock);
|
||||
+ uart_port_unlock_irq(&up->port);
|
||||
pm_runtime_mark_last_busy(port->dev);
|
||||
pm_runtime_put_autosuspend(port->dev);
|
||||
|
||||
@@ -529,7 +529,7 @@ static void omap_8250_pm(struct uart_port *port, unsigned int state,
|
||||
pm_runtime_get_sync(port->dev);
|
||||
|
||||
/* Synchronize UART_IER access against the console. */
|
||||
- spin_lock_irq(&port->lock);
|
||||
+ uart_port_lock_irq(port);
|
||||
|
||||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
|
||||
efr = serial_in(up, UART_EFR);
|
||||
@@ -541,7 +541,7 @@ static void omap_8250_pm(struct uart_port *port, unsigned int state,
|
||||
serial_out(up, UART_EFR, efr);
|
||||
serial_out(up, UART_LCR, 0);
|
||||
|
||||
- spin_unlock_irq(&port->lock);
|
||||
+ uart_port_unlock_irq(port);
|
||||
|
||||
pm_runtime_mark_last_busy(port->dev);
|
||||
pm_runtime_put_autosuspend(port->dev);
|
||||
@@ -660,7 +660,7 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
|
||||
unsigned long delay;
|
||||
|
||||
/* Synchronize UART_IER access against the console. */
|
||||
- spin_lock(&port->lock);
|
||||
+ uart_port_lock(port);
|
||||
up->ier = port->serial_in(port, UART_IER);
|
||||
if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
|
||||
port->ops->stop_rx(port);
|
||||
@@ -670,7 +670,7 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
|
||||
*/
|
||||
cancel_delayed_work(&up->overrun_backoff);
|
||||
}
|
||||
- spin_unlock(&port->lock);
|
||||
+ uart_port_unlock(port);
|
||||
|
||||
delay = msecs_to_jiffies(up->overrun_backoff_time_ms);
|
||||
schedule_delayed_work(&up->overrun_backoff, delay);
|
||||
@@ -717,10 +717,10 @@ static int omap_8250_startup(struct uart_port *port)
|
||||
}
|
||||
|
||||
/* Synchronize UART_IER access against the console. */
|
||||
- spin_lock_irq(&port->lock);
|
||||
+ uart_port_lock_irq(port);
|
||||
up->ier = UART_IER_RLSI | UART_IER_RDI;
|
||||
serial_out(up, UART_IER, up->ier);
|
||||
- spin_unlock_irq(&port->lock);
|
||||
+ uart_port_unlock_irq(port);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
up->capabilities |= UART_CAP_RPM;
|
||||
@@ -733,9 +733,9 @@ static int omap_8250_startup(struct uart_port *port)
|
||||
serial_out(up, UART_OMAP_WER, priv->wer);
|
||||
|
||||
if (up->dma && !(priv->habit & UART_HAS_EFR2)) {
|
||||
- spin_lock_irq(&port->lock);
|
||||
+ uart_port_lock_irq(port);
|
||||
up->dma->rx_dma(up);
|
||||
- spin_unlock_irq(&port->lock);
|
||||
+ uart_port_unlock_irq(port);
|
||||
}
|
||||
|
||||
enable_irq(up->port.irq);
|
||||
@@ -761,10 +761,10 @@ static void omap_8250_shutdown(struct uart_port *port)
|
||||
serial_out(up, UART_OMAP_EFR2, 0x0);
|
||||
|
||||
/* Synchronize UART_IER access against the console. */
|
||||
- spin_lock_irq(&port->lock);
|
||||
+ uart_port_lock_irq(port);
|
||||
up->ier = 0;
|
||||
serial_out(up, UART_IER, 0);
|
||||
- spin_unlock_irq(&port->lock);
|
||||
+ uart_port_unlock_irq(port);
|
||||
disable_irq_nosync(up->port.irq);
|
||||
dev_pm_clear_wake_irq(port->dev);
|
||||
|
||||
@@ -789,10 +789,10 @@ static void omap_8250_throttle(struct uart_port *port)
|
||||
|
||||
pm_runtime_get_sync(port->dev);
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
port->ops->stop_rx(port);
|
||||
priv->throttled = true;
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
pm_runtime_mark_last_busy(port->dev);
|
||||
pm_runtime_put_autosuspend(port->dev);
|
||||
@@ -807,14 +807,14 @@ static void omap_8250_unthrottle(struct uart_port *port)
|
||||
pm_runtime_get_sync(port->dev);
|
||||
|
||||
/* Synchronize UART_IER access against the console. */
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
priv->throttled = false;
|
||||
if (up->dma)
|
||||
up->dma->rx_dma(up);
|
||||
up->ier |= UART_IER_RLSI | UART_IER_RDI;
|
||||
port->read_status_mask |= UART_LSR_DR;
|
||||
serial_out(up, UART_IER, up->ier);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
pm_runtime_mark_last_busy(port->dev);
|
||||
pm_runtime_put_autosuspend(port->dev);
|
||||
@@ -958,7 +958,7 @@ static void __dma_rx_complete(void *param)
|
||||
unsigned long flags;
|
||||
|
||||
/* Synchronize UART_IER access against the console. */
|
||||
- spin_lock_irqsave(&p->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&p->port, &flags);
|
||||
|
||||
/*
|
||||
* If the tx status is not DMA_COMPLETE, then this is a delayed
|
||||
@@ -967,7 +967,7 @@ static void __dma_rx_complete(void *param)
|
||||
*/
|
||||
if (dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state) !=
|
||||
DMA_COMPLETE) {
|
||||
- spin_unlock_irqrestore(&p->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&p->port, flags);
|
||||
return;
|
||||
}
|
||||
__dma_rx_do_complete(p);
|
||||
@@ -978,7 +978,7 @@ static void __dma_rx_complete(void *param)
|
||||
omap_8250_rx_dma(p);
|
||||
}
|
||||
|
||||
- spin_unlock_irqrestore(&p->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&p->port, flags);
|
||||
}
|
||||
|
||||
static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
|
||||
@@ -1083,7 +1083,7 @@ static void omap_8250_dma_tx_complete(void *param)
|
||||
dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
|
||||
UART_XMIT_SIZE, DMA_TO_DEVICE);
|
||||
|
||||
- spin_lock_irqsave(&p->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&p->port, &flags);
|
||||
|
||||
dma->tx_running = 0;
|
||||
|
||||
@@ -1112,7 +1112,7 @@ static void omap_8250_dma_tx_complete(void *param)
|
||||
serial8250_set_THRI(p);
|
||||
}
|
||||
|
||||
- spin_unlock_irqrestore(&p->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&p->port, flags);
|
||||
}
|
||||
|
||||
static int omap_8250_tx_dma(struct uart_8250_port *p)
|
||||
@@ -1278,7 +1278,7 @@ static int omap_8250_dma_handle_irq(struct uart_port *port)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
- spin_lock(&port->lock);
|
||||
+ uart_port_lock(port);
|
||||
|
||||
status = serial_port_in(port, UART_LSR);
|
||||
|
||||
@@ -1758,15 +1758,15 @@ static int omap8250_runtime_resume(struct device *dev)
|
||||
up = serial8250_get_port(priv->line);
|
||||
|
||||
if (up && omap8250_lost_context(up)) {
|
||||
- spin_lock_irq(&up->port.lock);
|
||||
+ uart_port_lock_irq(&up->port);
|
||||
omap8250_restore_regs(up);
|
||||
- spin_unlock_irq(&up->port.lock);
|
||||
+ uart_port_unlock_irq(&up->port);
|
||||
}
|
||||
|
||||
if (up && up->dma && up->dma->rxchan && !(priv->habit & UART_HAS_EFR2)) {
|
||||
- spin_lock_irq(&up->port.lock);
|
||||
+ uart_port_lock_irq(&up->port);
|
||||
omap_8250_rx_dma(up);
|
||||
- spin_unlock_irq(&up->port.lock);
|
||||
+ uart_port_unlock_irq(&up->port);
|
||||
}
|
||||
|
||||
priv->latency = priv->calc_latency;
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
From b52d5efa5d1afba0b8e8c56b3fd6af134643a292 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:30 +0206
|
||||
Subject: [PATCH 040/195] serial: 8250_pci1xxxx: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-14-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/8250/8250_pci1xxxx.c | 8 ++++----
|
||||
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c
|
||||
index a3b25779d921..53e238c8cc89 100644
|
||||
--- a/drivers/tty/serial/8250/8250_pci1xxxx.c
|
||||
+++ b/drivers/tty/serial/8250/8250_pci1xxxx.c
|
||||
@@ -225,10 +225,10 @@ static bool pci1xxxx_port_suspend(int line)
|
||||
if (port->suspended == 0 && port->dev) {
|
||||
wakeup_mask = readb(up->port.membase + UART_WAKE_MASK_REG);
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
port->mctrl &= ~TIOCM_OUT2;
|
||||
port->ops->set_mctrl(port, port->mctrl);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
ret = (wakeup_mask & UART_WAKE_SRCS) != UART_WAKE_SRCS;
|
||||
}
|
||||
@@ -251,10 +251,10 @@ static void pci1xxxx_port_resume(int line)
|
||||
writeb(UART_WAKE_SRCS, port->membase + UART_WAKE_REG);
|
||||
|
||||
if (port->suspended == 0) {
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
port->mctrl |= TIOCM_OUT2;
|
||||
port->ops->set_mctrl(port, port->mctrl);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
mutex_unlock(&tport->mutex);
|
||||
}
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,138 @@
|
|||
From 174bb9e1e1fa3d4ef2c46054621435dbf7a7dc66 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:31 +0206
|
||||
Subject: [PATCH 041/195] serial: altera_jtaguart: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-15-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/altera_jtaguart.c | 28 ++++++++++++++--------------
|
||||
1 file changed, 14 insertions(+), 14 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
|
||||
index 5fab4c978891..7090b251dd4d 100644
|
||||
--- a/drivers/tty/serial/altera_jtaguart.c
|
||||
+++ b/drivers/tty/serial/altera_jtaguart.c
|
||||
@@ -147,14 +147,14 @@ static irqreturn_t altera_jtaguart_interrupt(int irq, void *data)
|
||||
isr = (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) >>
|
||||
ALTERA_JTAGUART_CONTROL_RI_OFF) & port->read_status_mask;
|
||||
|
||||
- spin_lock(&port->lock);
|
||||
+ uart_port_lock(port);
|
||||
|
||||
if (isr & ALTERA_JTAGUART_CONTROL_RE_MSK)
|
||||
altera_jtaguart_rx_chars(port);
|
||||
if (isr & ALTERA_JTAGUART_CONTROL_WE_MSK)
|
||||
altera_jtaguart_tx_chars(port);
|
||||
|
||||
- spin_unlock(&port->lock);
|
||||
+ uart_port_unlock(port);
|
||||
|
||||
return IRQ_RETVAL(isr);
|
||||
}
|
||||
@@ -180,14 +180,14 @@ static int altera_jtaguart_startup(struct uart_port *port)
|
||||
return ret;
|
||||
}
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
/* Enable RX interrupts now */
|
||||
port->read_status_mask = ALTERA_JTAGUART_CONTROL_RE_MSK;
|
||||
writel(port->read_status_mask,
|
||||
port->membase + ALTERA_JTAGUART_CONTROL_REG);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -196,14 +196,14 @@ static void altera_jtaguart_shutdown(struct uart_port *port)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
/* Disable all interrupts now */
|
||||
port->read_status_mask = 0;
|
||||
writel(port->read_status_mask,
|
||||
port->membase + ALTERA_JTAGUART_CONTROL_REG);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
free_irq(port->irq, port);
|
||||
}
|
||||
@@ -264,33 +264,33 @@ static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c
|
||||
unsigned long flags;
|
||||
u32 status;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
while (!altera_jtaguart_tx_space(port, &status)) {
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
if ((status & ALTERA_JTAGUART_CONTROL_AC_MSK) == 0) {
|
||||
return; /* no connection activity */
|
||||
}
|
||||
|
||||
cpu_relax();
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
}
|
||||
writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
#else
|
||||
static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
while (!altera_jtaguart_tx_space(port, NULL)) {
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
cpu_relax();
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
}
|
||||
writel(c, port->membase + ALTERA_JTAGUART_DATA_REG);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,121 @@
|
|||
From 9f6bc0c5b8dabce9436c34fea6a89469b652c41a Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:32 +0206
|
||||
Subject: [PATCH 042/195] serial: altera_uart: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-16-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/altera_uart.c | 20 ++++++++++----------
|
||||
1 file changed, 10 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
|
||||
index a9c41942190c..77835ac68df2 100644
|
||||
--- a/drivers/tty/serial/altera_uart.c
|
||||
+++ b/drivers/tty/serial/altera_uart.c
|
||||
@@ -164,13 +164,13 @@ static void altera_uart_break_ctl(struct uart_port *port, int break_state)
|
||||
struct altera_uart *pp = container_of(port, struct altera_uart, port);
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
if (break_state == -1)
|
||||
pp->imr |= ALTERA_UART_CONTROL_TRBK_MSK;
|
||||
else
|
||||
pp->imr &= ~ALTERA_UART_CONTROL_TRBK_MSK;
|
||||
altera_uart_update_ctrl_reg(pp);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static void altera_uart_set_termios(struct uart_port *port,
|
||||
@@ -187,10 +187,10 @@ static void altera_uart_set_termios(struct uart_port *port,
|
||||
tty_termios_copy_hw(termios, old);
|
||||
tty_termios_encode_baud_rate(termios, baud, baud);
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
uart_update_timeout(port, termios->c_cflag, baud);
|
||||
altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
/*
|
||||
* FIXME: port->read_status_mask and port->ignore_status_mask
|
||||
@@ -264,12 +264,12 @@ static irqreturn_t altera_uart_interrupt(int irq, void *data)
|
||||
|
||||
isr = altera_uart_readl(port, ALTERA_UART_STATUS_REG) & pp->imr;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
if (isr & ALTERA_UART_STATUS_RRDY_MSK)
|
||||
altera_uart_rx_chars(port);
|
||||
if (isr & ALTERA_UART_STATUS_TRDY_MSK)
|
||||
altera_uart_tx_chars(port);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
return IRQ_RETVAL(isr);
|
||||
}
|
||||
@@ -313,13 +313,13 @@ static int altera_uart_startup(struct uart_port *port)
|
||||
}
|
||||
}
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
/* Enable RX interrupts now */
|
||||
pp->imr = ALTERA_UART_CONTROL_RRDY_MSK;
|
||||
altera_uart_update_ctrl_reg(pp);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -329,13 +329,13 @@ static void altera_uart_shutdown(struct uart_port *port)
|
||||
struct altera_uart *pp = container_of(port, struct altera_uart, port);
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
/* Disable all interrupts now */
|
||||
pp->imr = 0;
|
||||
altera_uart_update_ctrl_reg(pp);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
if (port->irq)
|
||||
free_irq(port->irq, port);
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,59 +0,0 @@
|
|||
From 9d7551c43f81729a91f4b95812918e034efcbeb9 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Tue, 8 Jan 2013 21:36:51 +0100
|
||||
Subject: [PATCH 42/62] tty/serial/pl011: Make the locking work on RT
|
||||
|
||||
The lock is a sleeping lock and local_irq_save() is not the optimsation
|
||||
we are looking for. Redo it to make it work on -RT and non-RT.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/amba-pl011.c | 17 +++++++++++------
|
||||
1 file changed, 11 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
|
||||
index c74eaf2552c3..38eb30b8491b 100644
|
||||
--- a/drivers/tty/serial/amba-pl011.c
|
||||
+++ b/drivers/tty/serial/amba-pl011.c
|
||||
@@ -2316,18 +2316,24 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
|
||||
{
|
||||
struct uart_amba_port *uap = amba_ports[co->index];
|
||||
unsigned int old_cr = 0, new_cr;
|
||||
- unsigned long flags;
|
||||
+ unsigned long flags = 0;
|
||||
int locked = 1;
|
||||
|
||||
clk_enable(uap->clk);
|
||||
|
||||
- local_irq_save(flags);
|
||||
+ /*
|
||||
+ * local_irq_save(flags);
|
||||
+ *
|
||||
+ * This local_irq_save() is nonsense. If we come in via sysrq
|
||||
+ * handling then interrupts are already disabled. Aside of
|
||||
+ * that the port.sysrq check is racy on SMP regardless.
|
||||
+ */
|
||||
if (uap->port.sysrq)
|
||||
locked = 0;
|
||||
else if (oops_in_progress)
|
||||
- locked = spin_trylock(&uap->port.lock);
|
||||
+ locked = spin_trylock_irqsave(&uap->port.lock, flags);
|
||||
else
|
||||
- spin_lock(&uap->port.lock);
|
||||
+ spin_lock_irqsave(&uap->port.lock, flags);
|
||||
|
||||
/*
|
||||
* First save the CR then disable the interrupts
|
||||
@@ -2353,8 +2359,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
|
||||
pl011_write(old_cr, uap, REG_CR);
|
||||
|
||||
if (locked)
|
||||
- spin_unlock(&uap->port.lock);
|
||||
- local_irq_restore(flags);
|
||||
+ spin_unlock_irqrestore(&uap->port.lock, flags);
|
||||
|
||||
clk_disable(uap->clk);
|
||||
}
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,117 @@
|
|||
From c3a76ebbc1bc4735df825ef0639312c01bb0e07c Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:33 +0206
|
||||
Subject: [PATCH 043/195] serial: amba-pl010: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-17-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/amba-pl010.c | 20 ++++++++++----------
|
||||
1 file changed, 10 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
|
||||
index b5a7404cbacb..eabbf8afc9b5 100644
|
||||
--- a/drivers/tty/serial/amba-pl010.c
|
||||
+++ b/drivers/tty/serial/amba-pl010.c
|
||||
@@ -207,7 +207,7 @@ static irqreturn_t pl010_int(int irq, void *dev_id)
|
||||
unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
|
||||
int handled = 0;
|
||||
|
||||
- spin_lock(&port->lock);
|
||||
+ uart_port_lock(port);
|
||||
|
||||
status = readb(port->membase + UART010_IIR);
|
||||
if (status) {
|
||||
@@ -228,7 +228,7 @@ static irqreturn_t pl010_int(int irq, void *dev_id)
|
||||
handled = 1;
|
||||
}
|
||||
|
||||
- spin_unlock(&port->lock);
|
||||
+ uart_port_unlock(port);
|
||||
|
||||
return IRQ_RETVAL(handled);
|
||||
}
|
||||
@@ -270,14 +270,14 @@ static void pl010_break_ctl(struct uart_port *port, int break_state)
|
||||
unsigned long flags;
|
||||
unsigned int lcr_h;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
lcr_h = readb(port->membase + UART010_LCRH);
|
||||
if (break_state == -1)
|
||||
lcr_h |= UART01x_LCRH_BRK;
|
||||
else
|
||||
lcr_h &= ~UART01x_LCRH_BRK;
|
||||
writel(lcr_h, port->membase + UART010_LCRH);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static int pl010_startup(struct uart_port *port)
|
||||
@@ -385,7 +385,7 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
if (port->fifosize > 1)
|
||||
lcr_h |= UART01x_LCRH_FEN;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
/*
|
||||
* Update the per-port timeout.
|
||||
@@ -438,22 +438,22 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
writel(lcr_h, port->membase + UART010_LCRH);
|
||||
writel(old_cr, port->membase + UART010_CR);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static void pl010_set_ldisc(struct uart_port *port, struct ktermios *termios)
|
||||
{
|
||||
if (termios->c_line == N_PPS) {
|
||||
port->flags |= UPF_HARDPPS_CD;
|
||||
- spin_lock_irq(&port->lock);
|
||||
+ uart_port_lock_irq(port);
|
||||
pl010_enable_ms(port);
|
||||
- spin_unlock_irq(&port->lock);
|
||||
+ uart_port_unlock_irq(port);
|
||||
} else {
|
||||
port->flags &= ~UPF_HARDPPS_CD;
|
||||
if (!UART_ENABLE_MS(port, termios->c_cflag)) {
|
||||
- spin_lock_irq(&port->lock);
|
||||
+ uart_port_lock_irq(port);
|
||||
pl010_disable_ms(port);
|
||||
- spin_unlock_irq(&port->lock);
|
||||
+ uart_port_unlock_irq(port);
|
||||
}
|
||||
}
|
||||
}
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,332 @@
|
|||
From 81e1e4da2b424a5f718ce4cfae82035f8de62531 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:34 +0206
|
||||
Subject: [PATCH 044/195] serial: amba-pl011: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-18-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/amba-pl011.c | 72 ++++++++++++++++-----------------
|
||||
1 file changed, 36 insertions(+), 36 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
|
||||
index cd3913b933c7..aec67d740e44 100644
|
||||
--- a/drivers/tty/serial/amba-pl011.c
|
||||
+++ b/drivers/tty/serial/amba-pl011.c
|
||||
@@ -347,9 +347,9 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
|
||||
flag = TTY_FRAME;
|
||||
}
|
||||
|
||||
- spin_unlock(&uap->port.lock);
|
||||
+ uart_port_unlock(&uap->port);
|
||||
sysrq = uart_handle_sysrq_char(&uap->port, ch & 255);
|
||||
- spin_lock(&uap->port.lock);
|
||||
+ uart_port_lock(&uap->port);
|
||||
|
||||
if (!sysrq)
|
||||
uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
|
||||
@@ -544,7 +544,7 @@ static void pl011_dma_tx_callback(void *data)
|
||||
unsigned long flags;
|
||||
u16 dmacr;
|
||||
|
||||
- spin_lock_irqsave(&uap->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&uap->port, &flags);
|
||||
if (uap->dmatx.queued)
|
||||
dma_unmap_single(dmatx->chan->device->dev, dmatx->dma,
|
||||
dmatx->len, DMA_TO_DEVICE);
|
||||
@@ -565,7 +565,7 @@ static void pl011_dma_tx_callback(void *data)
|
||||
if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
|
||||
uart_circ_empty(&uap->port.state->xmit)) {
|
||||
uap->dmatx.queued = false;
|
||||
- spin_unlock_irqrestore(&uap->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&uap->port, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -576,7 +576,7 @@ static void pl011_dma_tx_callback(void *data)
|
||||
*/
|
||||
pl011_start_tx_pio(uap);
|
||||
|
||||
- spin_unlock_irqrestore(&uap->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&uap->port, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1004,7 +1004,7 @@ static void pl011_dma_rx_callback(void *data)
|
||||
* routine to flush out the secondary DMA buffer while
|
||||
* we immediately trigger the next DMA job.
|
||||
*/
|
||||
- spin_lock_irq(&uap->port.lock);
|
||||
+ uart_port_lock_irq(&uap->port);
|
||||
/*
|
||||
* Rx data can be taken by the UART interrupts during
|
||||
* the DMA irq handler. So we check the residue here.
|
||||
@@ -1020,7 +1020,7 @@ static void pl011_dma_rx_callback(void *data)
|
||||
ret = pl011_dma_rx_trigger_dma(uap);
|
||||
|
||||
pl011_dma_rx_chars(uap, pending, lastbuf, false);
|
||||
- spin_unlock_irq(&uap->port.lock);
|
||||
+ uart_port_unlock_irq(&uap->port);
|
||||
/*
|
||||
* Do this check after we picked the DMA chars so we don't
|
||||
* get some IRQ immediately from RX.
|
||||
@@ -1086,11 +1086,11 @@ static void pl011_dma_rx_poll(struct timer_list *t)
|
||||
if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
|
||||
> uap->dmarx.poll_timeout) {
|
||||
|
||||
- spin_lock_irqsave(&uap->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&uap->port, &flags);
|
||||
pl011_dma_rx_stop(uap);
|
||||
uap->im |= UART011_RXIM;
|
||||
pl011_write(uap->im, uap, REG_IMSC);
|
||||
- spin_unlock_irqrestore(&uap->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&uap->port, flags);
|
||||
|
||||
uap->dmarx.running = false;
|
||||
dmaengine_terminate_all(rxchan);
|
||||
@@ -1186,10 +1186,10 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
|
||||
while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
|
||||
cpu_relax();
|
||||
|
||||
- spin_lock_irq(&uap->port.lock);
|
||||
+ uart_port_lock_irq(&uap->port);
|
||||
uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
|
||||
pl011_write(uap->dmacr, uap, REG_DMACR);
|
||||
- spin_unlock_irq(&uap->port.lock);
|
||||
+ uart_port_unlock_irq(&uap->port);
|
||||
|
||||
if (uap->using_tx_dma) {
|
||||
/* In theory, this should already be done by pl011_dma_flush_buffer */
|
||||
@@ -1370,9 +1370,9 @@ static void pl011_throttle_rx(struct uart_port *port)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
pl011_stop_rx(port);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static void pl011_enable_ms(struct uart_port *port)
|
||||
@@ -1390,7 +1390,7 @@ __acquires(&uap->port.lock)
|
||||
{
|
||||
pl011_fifo_to_tty(uap);
|
||||
|
||||
- spin_unlock(&uap->port.lock);
|
||||
+ uart_port_unlock(&uap->port);
|
||||
tty_flip_buffer_push(&uap->port.state->port);
|
||||
/*
|
||||
* If we were temporarily out of DMA mode for a while,
|
||||
@@ -1415,7 +1415,7 @@ __acquires(&uap->port.lock)
|
||||
#endif
|
||||
}
|
||||
}
|
||||
- spin_lock(&uap->port.lock);
|
||||
+ uart_port_lock(&uap->port);
|
||||
}
|
||||
|
||||
static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
|
||||
@@ -1551,7 +1551,7 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
|
||||
unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
|
||||
int handled = 0;
|
||||
|
||||
- spin_lock_irqsave(&uap->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&uap->port, &flags);
|
||||
status = pl011_read(uap, REG_RIS) & uap->im;
|
||||
if (status) {
|
||||
do {
|
||||
@@ -1581,7 +1581,7 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
|
||||
handled = 1;
|
||||
}
|
||||
|
||||
- spin_unlock_irqrestore(&uap->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&uap->port, flags);
|
||||
|
||||
return IRQ_RETVAL(handled);
|
||||
}
|
||||
@@ -1653,14 +1653,14 @@ static void pl011_break_ctl(struct uart_port *port, int break_state)
|
||||
unsigned long flags;
|
||||
unsigned int lcr_h;
|
||||
|
||||
- spin_lock_irqsave(&uap->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&uap->port, &flags);
|
||||
lcr_h = pl011_read(uap, REG_LCRH_TX);
|
||||
if (break_state == -1)
|
||||
lcr_h |= UART01x_LCRH_BRK;
|
||||
else
|
||||
lcr_h &= ~UART01x_LCRH_BRK;
|
||||
pl011_write(lcr_h, uap, REG_LCRH_TX);
|
||||
- spin_unlock_irqrestore(&uap->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&uap->port, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CONSOLE_POLL
|
||||
@@ -1799,7 +1799,7 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap)
|
||||
unsigned long flags;
|
||||
unsigned int i;
|
||||
|
||||
- spin_lock_irqsave(&uap->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&uap->port, &flags);
|
||||
|
||||
/* Clear out any spuriously appearing RX interrupts */
|
||||
pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
|
||||
@@ -1821,7 +1821,7 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap)
|
||||
if (!pl011_dma_rx_running(uap))
|
||||
uap->im |= UART011_RXIM;
|
||||
pl011_write(uap->im, uap, REG_IMSC);
|
||||
- spin_unlock_irqrestore(&uap->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&uap->port, flags);
|
||||
}
|
||||
|
||||
static void pl011_unthrottle_rx(struct uart_port *port)
|
||||
@@ -1829,7 +1829,7 @@ static void pl011_unthrottle_rx(struct uart_port *port)
|
||||
struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&uap->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&uap->port, &flags);
|
||||
|
||||
uap->im = UART011_RTIM;
|
||||
if (!pl011_dma_rx_running(uap))
|
||||
@@ -1837,7 +1837,7 @@ static void pl011_unthrottle_rx(struct uart_port *port)
|
||||
|
||||
pl011_write(uap->im, uap, REG_IMSC);
|
||||
|
||||
- spin_unlock_irqrestore(&uap->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&uap->port, flags);
|
||||
}
|
||||
|
||||
static int pl011_startup(struct uart_port *port)
|
||||
@@ -1857,7 +1857,7 @@ static int pl011_startup(struct uart_port *port)
|
||||
|
||||
pl011_write(uap->vendor->ifls, uap, REG_IFLS);
|
||||
|
||||
- spin_lock_irq(&uap->port.lock);
|
||||
+ uart_port_lock_irq(&uap->port);
|
||||
|
||||
cr = pl011_read(uap, REG_CR);
|
||||
cr &= UART011_CR_RTS | UART011_CR_DTR;
|
||||
@@ -1868,7 +1868,7 @@ static int pl011_startup(struct uart_port *port)
|
||||
|
||||
pl011_write(cr, uap, REG_CR);
|
||||
|
||||
- spin_unlock_irq(&uap->port.lock);
|
||||
+ uart_port_unlock_irq(&uap->port);
|
||||
|
||||
/*
|
||||
* initialise the old status of the modem signals
|
||||
@@ -1929,12 +1929,12 @@ static void pl011_disable_uart(struct uart_amba_port *uap)
|
||||
unsigned int cr;
|
||||
|
||||
uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
|
||||
- spin_lock_irq(&uap->port.lock);
|
||||
+ uart_port_lock_irq(&uap->port);
|
||||
cr = pl011_read(uap, REG_CR);
|
||||
cr &= UART011_CR_RTS | UART011_CR_DTR;
|
||||
cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
|
||||
pl011_write(cr, uap, REG_CR);
|
||||
- spin_unlock_irq(&uap->port.lock);
|
||||
+ uart_port_unlock_irq(&uap->port);
|
||||
|
||||
/*
|
||||
* disable break condition and fifos
|
||||
@@ -1946,14 +1946,14 @@ static void pl011_disable_uart(struct uart_amba_port *uap)
|
||||
|
||||
static void pl011_disable_interrupts(struct uart_amba_port *uap)
|
||||
{
|
||||
- spin_lock_irq(&uap->port.lock);
|
||||
+ uart_port_lock_irq(&uap->port);
|
||||
|
||||
/* mask all interrupts and clear all pending ones */
|
||||
uap->im = 0;
|
||||
pl011_write(uap->im, uap, REG_IMSC);
|
||||
pl011_write(0xffff, uap, REG_ICR);
|
||||
|
||||
- spin_unlock_irq(&uap->port.lock);
|
||||
+ uart_port_unlock_irq(&uap->port);
|
||||
}
|
||||
|
||||
static void pl011_shutdown(struct uart_port *port)
|
||||
@@ -2098,7 +2098,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
|
||||
bits = tty_get_frame_size(termios->c_cflag);
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
/*
|
||||
* Update the per-port timeout.
|
||||
@@ -2172,7 +2172,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
old_cr |= UART011_CR_RXE;
|
||||
pl011_write(old_cr, uap, REG_CR);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -2190,10 +2190,10 @@ sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
termios->c_cflag &= ~(CMSPAR | CRTSCTS);
|
||||
termios->c_cflag |= CS8 | CLOCAL;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
uart_update_timeout(port, CS8, uap->fixed_baud);
|
||||
pl011_setup_status_masks(port, termios);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static const char *pl011_type(struct uart_port *port)
|
||||
@@ -2332,9 +2332,9 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
|
||||
if (uap->port.sysrq)
|
||||
locked = 0;
|
||||
else if (oops_in_progress)
|
||||
- locked = spin_trylock(&uap->port.lock);
|
||||
+ locked = uart_port_trylock(&uap->port);
|
||||
else
|
||||
- spin_lock(&uap->port.lock);
|
||||
+ uart_port_lock(&uap->port);
|
||||
|
||||
/*
|
||||
* First save the CR then disable the interrupts
|
||||
@@ -2360,7 +2360,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
|
||||
pl011_write(old_cr, uap, REG_CR);
|
||||
|
||||
if (locked)
|
||||
- spin_unlock(&uap->port.lock);
|
||||
+ uart_port_unlock(&uap->port);
|
||||
local_irq_restore(flags);
|
||||
|
||||
clk_disable(uap->clk);
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,81 @@
|
|||
From 0418ef3fdcdb25846f9cddf7a69ce8c097a483be Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:35 +0206
|
||||
Subject: [PATCH 045/195] serial: apb: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-19-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/apbuart.c | 8 ++++----
|
||||
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
|
||||
index d3cb341f2c55..364599f256db 100644
|
||||
--- a/drivers/tty/serial/apbuart.c
|
||||
+++ b/drivers/tty/serial/apbuart.c
|
||||
@@ -133,7 +133,7 @@ static irqreturn_t apbuart_int(int irq, void *dev_id)
|
||||
struct uart_port *port = dev_id;
|
||||
unsigned int status;
|
||||
|
||||
- spin_lock(&port->lock);
|
||||
+ uart_port_lock(port);
|
||||
|
||||
status = UART_GET_STATUS(port);
|
||||
if (status & UART_STATUS_DR)
|
||||
@@ -141,7 +141,7 @@ static irqreturn_t apbuart_int(int irq, void *dev_id)
|
||||
if (status & UART_STATUS_THE)
|
||||
apbuart_tx_chars(port);
|
||||
|
||||
- spin_unlock(&port->lock);
|
||||
+ uart_port_unlock(port);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@@ -228,7 +228,7 @@ static void apbuart_set_termios(struct uart_port *port,
|
||||
if (termios->c_cflag & CRTSCTS)
|
||||
cr |= UART_CTRL_FL;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
/* Update the per-port timeout. */
|
||||
uart_update_timeout(port, termios->c_cflag, baud);
|
||||
@@ -251,7 +251,7 @@ static void apbuart_set_termios(struct uart_port *port,
|
||||
UART_PUT_SCAL(port, quot);
|
||||
UART_PUT_CTRL(port, cr);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static const char *apbuart_type(struct uart_port *port)
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,149 @@
|
|||
From 44c925a7d696a264d3a2a82719ebf8edb6ed867d Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:36 +0206
|
||||
Subject: [PATCH 046/195] serial: ar933x: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-20-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/ar933x_uart.c | 26 +++++++++++++-------------
|
||||
1 file changed, 13 insertions(+), 13 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
|
||||
index 924c1a89347c..ffd234673177 100644
|
||||
--- a/drivers/tty/serial/ar933x_uart.c
|
||||
+++ b/drivers/tty/serial/ar933x_uart.c
|
||||
@@ -133,9 +133,9 @@ static unsigned int ar933x_uart_tx_empty(struct uart_port *port)
|
||||
unsigned long flags;
|
||||
unsigned int rdata;
|
||||
|
||||
- spin_lock_irqsave(&up->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||||
rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG);
|
||||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||||
|
||||
return (rdata & AR933X_UART_DATA_TX_CSR) ? 0 : TIOCSER_TEMT;
|
||||
}
|
||||
@@ -220,14 +220,14 @@ static void ar933x_uart_break_ctl(struct uart_port *port, int break_state)
|
||||
container_of(port, struct ar933x_uart_port, port);
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&up->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||||
if (break_state == -1)
|
||||
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
|
||||
AR933X_UART_CS_TX_BREAK);
|
||||
else
|
||||
ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
|
||||
AR933X_UART_CS_TX_BREAK);
|
||||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -318,7 +318,7 @@ static void ar933x_uart_set_termios(struct uart_port *port,
|
||||
* Ok, we're now changing the port state. Do it with
|
||||
* interrupts disabled.
|
||||
*/
|
||||
- spin_lock_irqsave(&up->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||||
|
||||
/* disable the UART */
|
||||
ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
|
||||
@@ -352,7 +352,7 @@ static void ar933x_uart_set_termios(struct uart_port *port,
|
||||
AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S,
|
||||
AR933X_UART_CS_IF_MODE_DCE << AR933X_UART_CS_IF_MODE_S);
|
||||
|
||||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||||
|
||||
if (tty_termios_baud_rate(new))
|
||||
tty_termios_encode_baud_rate(new, baud, baud);
|
||||
@@ -450,7 +450,7 @@ static irqreturn_t ar933x_uart_interrupt(int irq, void *dev_id)
|
||||
if ((status & AR933X_UART_CS_HOST_INT) == 0)
|
||||
return IRQ_NONE;
|
||||
|
||||
- spin_lock(&up->port.lock);
|
||||
+ uart_port_lock(&up->port);
|
||||
|
||||
status = ar933x_uart_read(up, AR933X_UART_INT_REG);
|
||||
status &= ar933x_uart_read(up, AR933X_UART_INT_EN_REG);
|
||||
@@ -468,7 +468,7 @@ static irqreturn_t ar933x_uart_interrupt(int irq, void *dev_id)
|
||||
ar933x_uart_tx_chars(up);
|
||||
}
|
||||
|
||||
- spin_unlock(&up->port.lock);
|
||||
+ uart_port_unlock(&up->port);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@@ -485,7 +485,7 @@ static int ar933x_uart_startup(struct uart_port *port)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
- spin_lock_irqsave(&up->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||||
|
||||
/* Enable HOST interrupts */
|
||||
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
|
||||
@@ -498,7 +498,7 @@ static int ar933x_uart_startup(struct uart_port *port)
|
||||
/* Enable RX interrupts */
|
||||
ar933x_uart_start_rx_interrupt(up);
|
||||
|
||||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -632,9 +632,9 @@ static void ar933x_uart_console_write(struct console *co, const char *s,
|
||||
if (up->port.sysrq)
|
||||
locked = 0;
|
||||
else if (oops_in_progress)
|
||||
- locked = spin_trylock(&up->port.lock);
|
||||
+ locked = uart_port_trylock(&up->port);
|
||||
else
|
||||
- spin_lock(&up->port.lock);
|
||||
+ uart_port_lock(&up->port);
|
||||
|
||||
/*
|
||||
* First save the IER then disable the interrupts
|
||||
@@ -654,7 +654,7 @@ static void ar933x_uart_console_write(struct console *co, const char *s,
|
||||
ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_ALLINTS);
|
||||
|
||||
if (locked)
|
||||
- spin_unlock(&up->port.lock);
|
||||
+ uart_port_unlock(&up->port);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,102 @@
|
|||
From 0898d04709fe45ddfbc0268f1bc4e1d6e7ef2c13 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:37 +0206
|
||||
Subject: [PATCH 047/195] serial: arc_uart: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-21-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/arc_uart.c | 16 ++++++++--------
|
||||
1 file changed, 8 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
|
||||
index ad4ae19b6ce3..1aa5b2b49c26 100644
|
||||
--- a/drivers/tty/serial/arc_uart.c
|
||||
+++ b/drivers/tty/serial/arc_uart.c
|
||||
@@ -279,9 +279,9 @@ static irqreturn_t arc_serial_isr(int irq, void *dev_id)
|
||||
if (status & RXIENB) {
|
||||
|
||||
/* already in ISR, no need of xx_irqsave */
|
||||
- spin_lock(&port->lock);
|
||||
+ uart_port_lock(port);
|
||||
arc_serial_rx_chars(port, status);
|
||||
- spin_unlock(&port->lock);
|
||||
+ uart_port_unlock(port);
|
||||
}
|
||||
|
||||
if ((status & TXIENB) && (status & TXEMPTY)) {
|
||||
@@ -291,12 +291,12 @@ static irqreturn_t arc_serial_isr(int irq, void *dev_id)
|
||||
*/
|
||||
UART_TX_IRQ_DISABLE(port);
|
||||
|
||||
- spin_lock(&port->lock);
|
||||
+ uart_port_lock(port);
|
||||
|
||||
if (!uart_tx_stopped(port))
|
||||
arc_serial_tx_chars(port);
|
||||
|
||||
- spin_unlock(&port->lock);
|
||||
+ uart_port_unlock(port);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@@ -366,7 +366,7 @@ arc_serial_set_termios(struct uart_port *port, struct ktermios *new,
|
||||
uartl = hw_val & 0xFF;
|
||||
uarth = (hw_val >> 8) & 0xFF;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
UART_ALL_IRQ_DISABLE(port);
|
||||
|
||||
@@ -391,7 +391,7 @@ arc_serial_set_termios(struct uart_port *port, struct ktermios *new,
|
||||
|
||||
uart_update_timeout(port, new->c_cflag, baud);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static const char *arc_serial_type(struct uart_port *port)
|
||||
@@ -521,9 +521,9 @@ static void arc_serial_console_write(struct console *co, const char *s,
|
||||
struct uart_port *port = &arc_uart_ports[co->index].port;
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
uart_console_write(port, s, count, arc_serial_console_putchar);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static struct console arc_console = {
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,124 @@
|
|||
From 46a1206089037fef9ea2f392d727c159d1466d61 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:38 +0206
|
||||
Subject: [PATCH 048/195] serial: atmel: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-22-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/atmel_serial.c | 24 ++++++++++++------------
|
||||
1 file changed, 12 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
|
||||
index 88cdafa5ac54..1946fafc3f3e 100644
|
||||
--- a/drivers/tty/serial/atmel_serial.c
|
||||
+++ b/drivers/tty/serial/atmel_serial.c
|
||||
@@ -861,7 +861,7 @@ static void atmel_complete_tx_dma(void *arg)
|
||||
struct dma_chan *chan = atmel_port->chan_tx;
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
if (chan)
|
||||
dmaengine_terminate_all(chan);
|
||||
@@ -893,7 +893,7 @@ static void atmel_complete_tx_dma(void *arg)
|
||||
atmel_port->tx_done_mask);
|
||||
}
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static void atmel_release_tx_dma(struct uart_port *port)
|
||||
@@ -1711,9 +1711,9 @@ static void atmel_tasklet_rx_func(struct tasklet_struct *t)
|
||||
struct uart_port *port = &atmel_port->uart;
|
||||
|
||||
/* The interrupt handler does not take the lock */
|
||||
- spin_lock(&port->lock);
|
||||
+ uart_port_lock(port);
|
||||
atmel_port->schedule_rx(port);
|
||||
- spin_unlock(&port->lock);
|
||||
+ uart_port_unlock(port);
|
||||
}
|
||||
|
||||
static void atmel_tasklet_tx_func(struct tasklet_struct *t)
|
||||
@@ -1723,9 +1723,9 @@ static void atmel_tasklet_tx_func(struct tasklet_struct *t)
|
||||
struct uart_port *port = &atmel_port->uart;
|
||||
|
||||
/* The interrupt handler does not take the lock */
|
||||
- spin_lock(&port->lock);
|
||||
+ uart_port_lock(port);
|
||||
atmel_port->schedule_tx(port);
|
||||
- spin_unlock(&port->lock);
|
||||
+ uart_port_unlock(port);
|
||||
}
|
||||
|
||||
static void atmel_init_property(struct atmel_uart_port *atmel_port,
|
||||
@@ -2175,7 +2175,7 @@ static void atmel_set_termios(struct uart_port *port,
|
||||
} else
|
||||
mode |= ATMEL_US_PAR_NONE;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
port->read_status_mask = ATMEL_US_OVRE;
|
||||
if (termios->c_iflag & INPCK)
|
||||
@@ -2377,22 +2377,22 @@ static void atmel_set_termios(struct uart_port *port,
|
||||
else
|
||||
atmel_disable_ms(port);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
|
||||
{
|
||||
if (termios->c_line == N_PPS) {
|
||||
port->flags |= UPF_HARDPPS_CD;
|
||||
- spin_lock_irq(&port->lock);
|
||||
+ uart_port_lock_irq(port);
|
||||
atmel_enable_ms(port);
|
||||
- spin_unlock_irq(&port->lock);
|
||||
+ uart_port_unlock_irq(port);
|
||||
} else {
|
||||
port->flags &= ~UPF_HARDPPS_CD;
|
||||
if (!UART_ENABLE_MS(port, termios->c_cflag)) {
|
||||
- spin_lock_irq(&port->lock);
|
||||
+ uart_port_lock_irq(port);
|
||||
atmel_disable_ms(port);
|
||||
- spin_unlock_irq(&port->lock);
|
||||
+ uart_port_unlock_irq(port);
|
||||
}
|
||||
}
|
||||
}
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,133 @@
|
|||
From 0bce8cca5301c02cad41bebb779749e2ffa36424 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:39 +0206
|
||||
Subject: [PATCH 049/195] serial: bcm63xx-uart: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-23-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/bcm63xx_uart.c | 22 +++++++++++-----------
|
||||
1 file changed, 11 insertions(+), 11 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
|
||||
index 0dd8cceb837c..4a08fd5ee61b 100644
|
||||
--- a/drivers/tty/serial/bcm63xx_uart.c
|
||||
+++ b/drivers/tty/serial/bcm63xx_uart.c
|
||||
@@ -201,7 +201,7 @@ static void bcm_uart_break_ctl(struct uart_port *port, int ctl)
|
||||
unsigned long flags;
|
||||
unsigned int val;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
val = bcm_uart_readl(port, UART_CTL_REG);
|
||||
if (ctl)
|
||||
@@ -210,7 +210,7 @@ static void bcm_uart_break_ctl(struct uart_port *port, int ctl)
|
||||
val &= ~UART_CTL_XMITBRK_MASK;
|
||||
bcm_uart_writel(port, val, UART_CTL_REG);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -332,7 +332,7 @@ static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id)
|
||||
unsigned int irqstat;
|
||||
|
||||
port = dev_id;
|
||||
- spin_lock(&port->lock);
|
||||
+ uart_port_lock(port);
|
||||
|
||||
irqstat = bcm_uart_readl(port, UART_IR_REG);
|
||||
if (irqstat & UART_RX_INT_STAT)
|
||||
@@ -353,7 +353,7 @@ static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id)
|
||||
estat & UART_EXTINP_DCD_MASK);
|
||||
}
|
||||
|
||||
- spin_unlock(&port->lock);
|
||||
+ uart_port_unlock(port);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@@ -451,9 +451,9 @@ static void bcm_uart_shutdown(struct uart_port *port)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
bcm_uart_writel(port, 0, UART_IR_REG);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
bcm_uart_disable(port);
|
||||
bcm_uart_flush(port);
|
||||
@@ -470,7 +470,7 @@ static void bcm_uart_set_termios(struct uart_port *port, struct ktermios *new,
|
||||
unsigned long flags;
|
||||
int tries;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
/* Drain the hot tub fully before we power it off for the winter. */
|
||||
for (tries = 3; !bcm_uart_tx_empty(port) && tries; tries--)
|
||||
@@ -546,7 +546,7 @@ static void bcm_uart_set_termios(struct uart_port *port, struct ktermios *new,
|
||||
|
||||
uart_update_timeout(port, new->c_cflag, baud);
|
||||
bcm_uart_enable(port);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -712,9 +712,9 @@ static void bcm_console_write(struct console *co, const char *s,
|
||||
/* bcm_uart_interrupt() already took the lock */
|
||||
locked = 0;
|
||||
} else if (oops_in_progress) {
|
||||
- locked = spin_trylock(&port->lock);
|
||||
+ locked = uart_port_trylock(port);
|
||||
} else {
|
||||
- spin_lock(&port->lock);
|
||||
+ uart_port_lock(port);
|
||||
locked = 1;
|
||||
}
|
||||
|
||||
@@ -725,7 +725,7 @@ static void bcm_console_write(struct console *co, const char *s,
|
||||
wait_for_xmitr(port);
|
||||
|
||||
if (locked)
|
||||
- spin_unlock(&port->lock);
|
||||
+ uart_port_unlock(port);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
From 1d8309a42134af8335590a7b58e24aa6ba8172bc Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:40 +0206
|
||||
Subject: [PATCH 050/195] serial: cpm_uart: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-24-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/cpm_uart.c | 8 ++++----
|
||||
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/cpm_uart.c b/drivers/tty/serial/cpm_uart.c
|
||||
index 626423022d62..be4af6eda4c2 100644
|
||||
--- a/drivers/tty/serial/cpm_uart.c
|
||||
+++ b/drivers/tty/serial/cpm_uart.c
|
||||
@@ -569,7 +569,7 @@ static void cpm_uart_set_termios(struct uart_port *port,
|
||||
if ((termios->c_cflag & CREAD) == 0)
|
||||
port->read_status_mask &= ~BD_SC_EMPTY;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
if (IS_SMC(pinfo)) {
|
||||
unsigned int bits = tty_get_frame_size(termios->c_cflag);
|
||||
@@ -609,7 +609,7 @@ static void cpm_uart_set_termios(struct uart_port *port,
|
||||
clk_set_rate(pinfo->clk, baud);
|
||||
else
|
||||
cpm_setbrg(pinfo->brg - 1, baud);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static const char *cpm_uart_type(struct uart_port *port)
|
||||
@@ -1386,9 +1386,9 @@ static void cpm_uart_console_write(struct console *co, const char *s,
|
||||
cpm_uart_early_write(pinfo, s, count, true);
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
- spin_lock_irqsave(&pinfo->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&pinfo->port, &flags);
|
||||
cpm_uart_early_write(pinfo, s, count, true);
|
||||
- spin_unlock_irqrestore(&pinfo->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&pinfo->port, flags);
|
||||
}
|
||||
}
|
||||
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,118 @@
|
|||
From ab7a1a85b420f839afe9b6ac94e420c0016f47a7 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:41 +0206
|
||||
Subject: [PATCH 051/195] serial: digicolor: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Acked-by: Baruch Siach <baruch@tkos.co.il>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-25-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/digicolor-usart.c | 18 +++++++++---------
|
||||
1 file changed, 9 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
|
||||
index 128b5479e813..5004125f3045 100644
|
||||
--- a/drivers/tty/serial/digicolor-usart.c
|
||||
+++ b/drivers/tty/serial/digicolor-usart.c
|
||||
@@ -133,7 +133,7 @@ static void digicolor_uart_rx(struct uart_port *port)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
while (1) {
|
||||
u8 status, ch, ch_flag;
|
||||
@@ -172,7 +172,7 @@ static void digicolor_uart_rx(struct uart_port *port)
|
||||
ch_flag);
|
||||
}
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
tty_flip_buffer_push(&port->state->port);
|
||||
}
|
||||
@@ -185,7 +185,7 @@ static void digicolor_uart_tx(struct uart_port *port)
|
||||
if (digicolor_uart_tx_full(port))
|
||||
return;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
if (port->x_char) {
|
||||
writeb_relaxed(port->x_char, port->membase + UA_EMI_REC);
|
||||
@@ -211,7 +211,7 @@ static void digicolor_uart_tx(struct uart_port *port)
|
||||
uart_write_wakeup(port);
|
||||
|
||||
out:
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static irqreturn_t digicolor_uart_int(int irq, void *dev_id)
|
||||
@@ -333,7 +333,7 @@ static void digicolor_uart_set_termios(struct uart_port *port,
|
||||
port->ignore_status_mask |= UA_STATUS_OVERRUN_ERR
|
||||
| UA_STATUS_PARITY_ERR | UA_STATUS_FRAME_ERR;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
uart_update_timeout(port, termios->c_cflag, baud);
|
||||
|
||||
@@ -341,7 +341,7 @@ static void digicolor_uart_set_termios(struct uart_port *port,
|
||||
writeb_relaxed(divisor & 0xff, port->membase + UA_HBAUD_LO);
|
||||
writeb_relaxed(divisor >> 8, port->membase + UA_HBAUD_HI);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static const char *digicolor_uart_type(struct uart_port *port)
|
||||
@@ -398,14 +398,14 @@ static void digicolor_uart_console_write(struct console *co, const char *c,
|
||||
int locked = 1;
|
||||
|
||||
if (oops_in_progress)
|
||||
- locked = spin_trylock_irqsave(&port->lock, flags);
|
||||
+ locked = uart_port_trylock_irqsave(port, &flags);
|
||||
else
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
uart_console_write(port, c, n, digicolor_uart_console_putchar);
|
||||
|
||||
if (locked)
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
/* Wait for transmitter to become empty */
|
||||
do {
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
From cc2be4ec82b6c49240b861a11de8c4336501dc09 Mon Sep 17 00:00:00 2001
|
||||
From: Clark Williams <clrkwllms@kernel.org>
|
||||
Date: Fri, 18 Aug 2023 10:45:35 -0500
|
||||
Subject: [PATCH 52/62] 'Linux 6.1.46-rt13 REBASE'
|
||||
|
||||
Signed-off-by: Clark Williams <clrkwllms@kernel.org>
|
||||
---
|
||||
localversion-rt | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/localversion-rt b/localversion-rt
|
||||
index 045478966e9f..9f7d0bdbffb1 100644
|
||||
--- a/localversion-rt
|
||||
+++ b/localversion-rt
|
||||
@@ -1 +1 @@
|
||||
--rt7
|
||||
+-rt13
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,166 @@
|
|||
From 3579237b515dfb4878cbc1eb1d2313d6b0fb2b7d Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:42 +0206
|
||||
Subject: [PATCH 052/195] serial: dz: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-26-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/dz.c | 32 ++++++++++++++++----------------
|
||||
1 file changed, 16 insertions(+), 16 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
|
||||
index 667f52e83277..6df7af9edc1c 100644
|
||||
--- a/drivers/tty/serial/dz.c
|
||||
+++ b/drivers/tty/serial/dz.c
|
||||
@@ -268,9 +268,9 @@ static inline void dz_transmit_chars(struct dz_mux *mux)
|
||||
}
|
||||
/* If nothing to do or stopped or hardware stopped. */
|
||||
if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) {
|
||||
- spin_lock(&dport->port.lock);
|
||||
+ uart_port_lock(&dport->port);
|
||||
dz_stop_tx(&dport->port);
|
||||
- spin_unlock(&dport->port.lock);
|
||||
+ uart_port_unlock(&dport->port);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -287,9 +287,9 @@ static inline void dz_transmit_chars(struct dz_mux *mux)
|
||||
|
||||
/* Are we are done. */
|
||||
if (uart_circ_empty(xmit)) {
|
||||
- spin_lock(&dport->port.lock);
|
||||
+ uart_port_lock(&dport->port);
|
||||
dz_stop_tx(&dport->port);
|
||||
- spin_unlock(&dport->port.lock);
|
||||
+ uart_port_unlock(&dport->port);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -415,14 +415,14 @@ static int dz_startup(struct uart_port *uport)
|
||||
return ret;
|
||||
}
|
||||
|
||||
- spin_lock_irqsave(&dport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&dport->port, &flags);
|
||||
|
||||
/* Enable interrupts. */
|
||||
tmp = dz_in(dport, DZ_CSR);
|
||||
tmp |= DZ_RIE | DZ_TIE;
|
||||
dz_out(dport, DZ_CSR, tmp);
|
||||
|
||||
- spin_unlock_irqrestore(&dport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&dport->port, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -443,9 +443,9 @@ static void dz_shutdown(struct uart_port *uport)
|
||||
int irq_guard;
|
||||
u16 tmp;
|
||||
|
||||
- spin_lock_irqsave(&dport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&dport->port, &flags);
|
||||
dz_stop_tx(&dport->port);
|
||||
- spin_unlock_irqrestore(&dport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&dport->port, flags);
|
||||
|
||||
irq_guard = atomic_add_return(-1, &mux->irq_guard);
|
||||
if (!irq_guard) {
|
||||
@@ -491,14 +491,14 @@ static void dz_break_ctl(struct uart_port *uport, int break_state)
|
||||
unsigned long flags;
|
||||
unsigned short tmp, mask = 1 << dport->port.line;
|
||||
|
||||
- spin_lock_irqsave(&uport->lock, flags);
|
||||
+ uart_port_lock_irqsave(uport, &flags);
|
||||
tmp = dz_in(dport, DZ_TCR);
|
||||
if (break_state)
|
||||
tmp |= mask;
|
||||
else
|
||||
tmp &= ~mask;
|
||||
dz_out(dport, DZ_TCR, tmp);
|
||||
- spin_unlock_irqrestore(&uport->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(uport, flags);
|
||||
}
|
||||
|
||||
static int dz_encode_baud_rate(unsigned int baud)
|
||||
@@ -608,7 +608,7 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
|
||||
if (termios->c_cflag & CREAD)
|
||||
cflag |= DZ_RXENAB;
|
||||
|
||||
- spin_lock_irqsave(&dport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&dport->port, &flags);
|
||||
|
||||
uart_update_timeout(uport, termios->c_cflag, baud);
|
||||
|
||||
@@ -631,7 +631,7 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
|
||||
if (termios->c_iflag & IGNBRK)
|
||||
dport->port.ignore_status_mask |= DZ_BREAK;
|
||||
|
||||
- spin_unlock_irqrestore(&dport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&dport->port, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -645,12 +645,12 @@ static void dz_pm(struct uart_port *uport, unsigned int state,
|
||||
struct dz_port *dport = to_dport(uport);
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&dport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&dport->port, &flags);
|
||||
if (state < 3)
|
||||
dz_start_tx(&dport->port);
|
||||
else
|
||||
dz_stop_tx(&dport->port);
|
||||
- spin_unlock_irqrestore(&dport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&dport->port, flags);
|
||||
}
|
||||
|
||||
|
||||
@@ -811,7 +811,7 @@ static void dz_console_putchar(struct uart_port *uport, unsigned char ch)
|
||||
unsigned short csr, tcr, trdy, mask;
|
||||
int loops = 10000;
|
||||
|
||||
- spin_lock_irqsave(&dport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&dport->port, &flags);
|
||||
csr = dz_in(dport, DZ_CSR);
|
||||
dz_out(dport, DZ_CSR, csr & ~DZ_TIE);
|
||||
tcr = dz_in(dport, DZ_TCR);
|
||||
@@ -819,7 +819,7 @@ static void dz_console_putchar(struct uart_port *uport, unsigned char ch)
|
||||
mask = tcr;
|
||||
dz_out(dport, DZ_TCR, mask);
|
||||
iob();
|
||||
- spin_unlock_irqrestore(&dport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&dport->port, flags);
|
||||
|
||||
do {
|
||||
trdy = dz_in(dport, DZ_CSR);
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,92 +0,0 @@
|
|||
From be70bbd4d08f00c845633614925c0b126f11a30a Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Fri, 10 Mar 2023 17:29:05 +0100
|
||||
Subject: [PATCH 53/62] io-mapping: don't disable preempt on RT in
|
||||
io_mapping_map_atomic_wc().
|
||||
|
||||
io_mapping_map_atomic_wc() disables preemption and pagefaults for
|
||||
historical reasons. The conversion to io_mapping_map_local_wc(), which
|
||||
only disables migration, cannot be done wholesale because quite some call
|
||||
sites need to be updated to accommodate with the changed semantics.
|
||||
|
||||
On PREEMPT_RT enabled kernels the io_mapping_map_atomic_wc() semantics are
|
||||
problematic due to the implicit disabling of preemption which makes it
|
||||
impossible to acquire 'sleeping' spinlocks within the mapped atomic
|
||||
sections.
|
||||
|
||||
PREEMPT_RT replaces the preempt_disable() with a migrate_disable() for
|
||||
more than a decade. It could be argued that this is a justification to do
|
||||
this unconditionally, but PREEMPT_RT covers only a limited number of
|
||||
architectures and it disables some functionality which limits the coverage
|
||||
further.
|
||||
|
||||
Limit the replacement to PREEMPT_RT for now. This is also done
|
||||
kmap_atomic().
|
||||
|
||||
Link: https://lkml.kernel.org/r/20230310162905.O57Pj7hh@linutronix.de
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Reported-by: Richard Weinberger <richard.weinberger@gmail.com>
|
||||
Link: https://lore.kernel.org/CAFLxGvw0WMxaMqYqJ5WgvVSbKHq2D2xcXTOgMCpgq9nDC-MWTQ@mail.gmail.com
|
||||
Cc: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
||||
(cherry picked from commit 7eb16f23b9a415f062db22739e59bb144e0b24ab)
|
||||
Signed-off-by: Clark Williams <clark.williams@gmail.com>
|
||||
---
|
||||
include/linux/io-mapping.h | 20 ++++++++++++++++----
|
||||
1 file changed, 16 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
|
||||
index 66a774d2710e..b08532b8fba7 100644
|
||||
--- a/include/linux/io-mapping.h
|
||||
+++ b/include/linux/io-mapping.h
|
||||
@@ -69,7 +69,10 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
|
||||
|
||||
BUG_ON(offset >= mapping->size);
|
||||
phys_addr = mapping->base + offset;
|
||||
- preempt_disable();
|
||||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
+ preempt_disable();
|
||||
+ else
|
||||
+ migrate_disable();
|
||||
pagefault_disable();
|
||||
return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
|
||||
}
|
||||
@@ -79,7 +82,10 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
|
||||
{
|
||||
kunmap_local_indexed((void __force *)vaddr);
|
||||
pagefault_enable();
|
||||
- preempt_enable();
|
||||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
+ preempt_enable();
|
||||
+ else
|
||||
+ migrate_enable();
|
||||
}
|
||||
|
||||
static inline void __iomem *
|
||||
@@ -162,7 +168,10 @@ static inline void __iomem *
|
||||
io_mapping_map_atomic_wc(struct io_mapping *mapping,
|
||||
unsigned long offset)
|
||||
{
|
||||
- preempt_disable();
|
||||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
+ preempt_disable();
|
||||
+ else
|
||||
+ migrate_disable();
|
||||
pagefault_disable();
|
||||
return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
|
||||
}
|
||||
@@ -172,7 +181,10 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
|
||||
{
|
||||
io_mapping_unmap(vaddr);
|
||||
pagefault_enable();
|
||||
- preempt_enable();
|
||||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
+ preempt_enable();
|
||||
+ else
|
||||
+ migrate_enable();
|
||||
}
|
||||
|
||||
static inline void __iomem *
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,148 @@
|
|||
From ab1c17a33ff558e1044149c683ec559daca8b962 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:43 +0206
|
||||
Subject: [PATCH 053/195] serial: linflexuart: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-27-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/fsl_linflexuart.c | 26 +++++++++++++-------------
|
||||
1 file changed, 13 insertions(+), 13 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c
|
||||
index 249cb380c3c6..7fa809a405e8 100644
|
||||
--- a/drivers/tty/serial/fsl_linflexuart.c
|
||||
+++ b/drivers/tty/serial/fsl_linflexuart.c
|
||||
@@ -203,7 +203,7 @@ static irqreturn_t linflex_txint(int irq, void *dev_id)
|
||||
struct circ_buf *xmit = &sport->state->xmit;
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&sport->lock, flags);
|
||||
+ uart_port_lock_irqsave(sport, &flags);
|
||||
|
||||
if (sport->x_char) {
|
||||
linflex_put_char(sport, sport->x_char);
|
||||
@@ -217,7 +217,7 @@ static irqreturn_t linflex_txint(int irq, void *dev_id)
|
||||
|
||||
linflex_transmit_buffer(sport);
|
||||
out:
|
||||
- spin_unlock_irqrestore(&sport->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(sport, flags);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@@ -230,7 +230,7 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
|
||||
unsigned char rx;
|
||||
bool brk;
|
||||
|
||||
- spin_lock_irqsave(&sport->lock, flags);
|
||||
+ uart_port_lock_irqsave(sport, &flags);
|
||||
|
||||
status = readl(sport->membase + UARTSR);
|
||||
while (status & LINFLEXD_UARTSR_RMB) {
|
||||
@@ -266,7 +266,7 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
|
||||
}
|
||||
}
|
||||
|
||||
- spin_unlock_irqrestore(&sport->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(sport, flags);
|
||||
|
||||
tty_flip_buffer_push(port);
|
||||
|
||||
@@ -369,11 +369,11 @@ static int linflex_startup(struct uart_port *port)
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
linflex_setup_watermark(port);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
ret = devm_request_irq(port->dev, port->irq, linflex_int, 0,
|
||||
DRIVER_NAME, port);
|
||||
@@ -386,14 +386,14 @@ static void linflex_shutdown(struct uart_port *port)
|
||||
unsigned long ier;
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
/* disable interrupts */
|
||||
ier = readl(port->membase + LINIER);
|
||||
ier &= ~(LINFLEXD_LINIER_DRIE | LINFLEXD_LINIER_DTIE);
|
||||
writel(ier, port->membase + LINIER);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
devm_free_irq(port->dev, port->irq, port);
|
||||
}
|
||||
@@ -474,7 +474,7 @@ linflex_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
cr &= ~LINFLEXD_UARTCR_PCE;
|
||||
}
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
port->read_status_mask = 0;
|
||||
|
||||
@@ -507,7 +507,7 @@ linflex_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
|
||||
writel(cr1, port->membase + LINCR1);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static const char *linflex_type(struct uart_port *port)
|
||||
@@ -646,14 +646,14 @@ linflex_console_write(struct console *co, const char *s, unsigned int count)
|
||||
if (sport->sysrq)
|
||||
locked = 0;
|
||||
else if (oops_in_progress)
|
||||
- locked = spin_trylock_irqsave(&sport->lock, flags);
|
||||
+ locked = uart_port_trylock_irqsave(sport, &flags);
|
||||
else
|
||||
- spin_lock_irqsave(&sport->lock, flags);
|
||||
+ uart_port_lock_irqsave(sport, &flags);
|
||||
|
||||
linflex_string_write(sport, s, count);
|
||||
|
||||
if (locked)
|
||||
- spin_unlock_irqrestore(&sport->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(sport, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,62 +0,0 @@
|
|||
From 23249660cee29a01660c4518a3046dfba2b7199c Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Tue, 21 Mar 2023 17:11:40 +0100
|
||||
Subject: [PATCH 54/62] locking/rwbase: Mitigate indefinite writer starvation
|
||||
|
||||
On PREEMPT_RT, rw_semaphore and rwlock_t locks are unfair to writers.
|
||||
Readers can indefinitely acquire the lock unless the writer fully acquired
|
||||
the lock, which might never happen if there is always a reader in the
|
||||
critical section owning the lock.
|
||||
|
||||
Mel Gorman reported that since LTP-20220121 the dio_truncate test case
|
||||
went from having 1 reader to having 16 readers and that number of readers
|
||||
is sufficient to prevent the down_write ever succeeding while readers
|
||||
exist. Eventually the test is killed after 30 minutes as a failure.
|
||||
|
||||
Mel proposed a timeout to limit how long a writer can be blocked until
|
||||
the reader is forced into the slowpath.
|
||||
|
||||
Thomas argued that there is no added value by providing this timeout. From
|
||||
a PREEMPT_RT point of view, there are no critical rw_semaphore or rwlock_t
|
||||
locks left where the reader must be preferred.
|
||||
|
||||
Mitigate indefinite writer starvation by forcing the READER into the
|
||||
slowpath once the WRITER attempts to acquire the lock.
|
||||
|
||||
Reported-by: Mel Gorman <mgorman@techsingularity.net>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: Ingo Molnar <mingo@kernel.org>
|
||||
Acked-by: Mel Gorman <mgorman@techsingularity.net>
|
||||
Link: https://lore.kernel.org/877cwbq4cq.ffs@tglx
|
||||
Link: https://lore.kernel.org/r/20230321161140.HMcQEhHb@linutronix.de
|
||||
Cc: Linus Torvalds <torvalds@linux-foundation.org>
|
||||
(cherry picked from commit 286deb7ec03d941664ac3ffaff58814b454adf65)
|
||||
Signed-off-by: Clark Williams <clark.williams@gmail.com>
|
||||
---
|
||||
kernel/locking/rwbase_rt.c | 9 ---------
|
||||
1 file changed, 9 deletions(-)
|
||||
|
||||
diff --git a/kernel/locking/rwbase_rt.c b/kernel/locking/rwbase_rt.c
|
||||
index c201aadb9301..25ec0239477c 100644
|
||||
--- a/kernel/locking/rwbase_rt.c
|
||||
+++ b/kernel/locking/rwbase_rt.c
|
||||
@@ -72,15 +72,6 @@ static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
|
||||
int ret;
|
||||
|
||||
raw_spin_lock_irq(&rtm->wait_lock);
|
||||
- /*
|
||||
- * Allow readers, as long as the writer has not completely
|
||||
- * acquired the semaphore for write.
|
||||
- */
|
||||
- if (atomic_read(&rwb->readers) != WRITER_BIAS) {
|
||||
- atomic_inc(&rwb->readers);
|
||||
- raw_spin_unlock_irq(&rtm->wait_lock);
|
||||
- return 0;
|
||||
- }
|
||||
|
||||
/*
|
||||
* Call into the slow lock path with the rtmutex->wait_lock
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,394 @@
|
|||
From be3a24b837786995c78967d477f7015686b79880 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:44 +0206
|
||||
Subject: [PATCH 054/195] serial: fsl_lpuart: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-28-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/fsl_lpuart.c | 88 ++++++++++++++++-----------------
|
||||
1 file changed, 44 insertions(+), 44 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
|
||||
index f72e1340b47d..6d0cfb2e86b4 100644
|
||||
--- a/drivers/tty/serial/fsl_lpuart.c
|
||||
+++ b/drivers/tty/serial/fsl_lpuart.c
|
||||
@@ -532,9 +532,9 @@ static void lpuart_dma_tx_complete(void *arg)
|
||||
struct dma_chan *chan = sport->dma_tx_chan;
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
if (!sport->dma_tx_in_progress) {
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -543,7 +543,7 @@ static void lpuart_dma_tx_complete(void *arg)
|
||||
|
||||
uart_xmit_advance(&sport->port, sport->dma_tx_bytes);
|
||||
sport->dma_tx_in_progress = false;
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
|
||||
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
|
||||
uart_write_wakeup(&sport->port);
|
||||
@@ -553,12 +553,12 @@ static void lpuart_dma_tx_complete(void *arg)
|
||||
return;
|
||||
}
|
||||
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
|
||||
if (!lpuart_stopped_or_empty(&sport->port))
|
||||
lpuart_dma_tx(sport);
|
||||
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
}
|
||||
|
||||
static dma_addr_t lpuart_dma_datareg_addr(struct lpuart_port *sport)
|
||||
@@ -651,7 +651,7 @@ static int lpuart_poll_init(struct uart_port *port)
|
||||
|
||||
sport->port.fifosize = 0;
|
||||
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
/* Disable Rx & Tx */
|
||||
writeb(0, sport->port.membase + UARTCR2);
|
||||
|
||||
@@ -675,7 +675,7 @@ static int lpuart_poll_init(struct uart_port *port)
|
||||
|
||||
/* Enable Rx and Tx */
|
||||
writeb(UARTCR2_RE | UARTCR2_TE, sport->port.membase + UARTCR2);
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -703,7 +703,7 @@ static int lpuart32_poll_init(struct uart_port *port)
|
||||
|
||||
sport->port.fifosize = 0;
|
||||
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
|
||||
/* Disable Rx & Tx */
|
||||
lpuart32_write(&sport->port, 0, UARTCTRL);
|
||||
@@ -724,7 +724,7 @@ static int lpuart32_poll_init(struct uart_port *port)
|
||||
|
||||
/* Enable Rx and Tx */
|
||||
lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -879,9 +879,9 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port)
|
||||
|
||||
static void lpuart_txint(struct lpuart_port *sport)
|
||||
{
|
||||
- spin_lock(&sport->port.lock);
|
||||
+ uart_port_lock(&sport->port);
|
||||
lpuart_transmit_buffer(sport);
|
||||
- spin_unlock(&sport->port.lock);
|
||||
+ uart_port_unlock(&sport->port);
|
||||
}
|
||||
|
||||
static void lpuart_rxint(struct lpuart_port *sport)
|
||||
@@ -890,7 +890,7 @@ static void lpuart_rxint(struct lpuart_port *sport)
|
||||
struct tty_port *port = &sport->port.state->port;
|
||||
unsigned char rx, sr;
|
||||
|
||||
- spin_lock(&sport->port.lock);
|
||||
+ uart_port_lock(&sport->port);
|
||||
|
||||
while (!(readb(sport->port.membase + UARTSFIFO) & UARTSFIFO_RXEMPT)) {
|
||||
flg = TTY_NORMAL;
|
||||
@@ -956,9 +956,9 @@ static void lpuart_rxint(struct lpuart_port *sport)
|
||||
|
||||
static void lpuart32_txint(struct lpuart_port *sport)
|
||||
{
|
||||
- spin_lock(&sport->port.lock);
|
||||
+ uart_port_lock(&sport->port);
|
||||
lpuart32_transmit_buffer(sport);
|
||||
- spin_unlock(&sport->port.lock);
|
||||
+ uart_port_unlock(&sport->port);
|
||||
}
|
||||
|
||||
static void lpuart32_rxint(struct lpuart_port *sport)
|
||||
@@ -968,7 +968,7 @@ static void lpuart32_rxint(struct lpuart_port *sport)
|
||||
unsigned long rx, sr;
|
||||
bool is_break;
|
||||
|
||||
- spin_lock(&sport->port.lock);
|
||||
+ uart_port_lock(&sport->port);
|
||||
|
||||
while (!(lpuart32_read(&sport->port, UARTFIFO) & UARTFIFO_RXEMPT)) {
|
||||
flg = TTY_NORMAL;
|
||||
@@ -1170,12 +1170,12 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
|
||||
|
||||
async_tx_ack(sport->dma_rx_desc);
|
||||
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
|
||||
dmastat = dmaengine_tx_status(chan, sport->dma_rx_cookie, &state);
|
||||
if (dmastat == DMA_ERROR) {
|
||||
dev_err(sport->port.dev, "Rx DMA transfer failed!\n");
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1244,7 +1244,7 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
|
||||
dma_sync_sg_for_device(chan->device->dev, &sport->rx_sgl, 1,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
|
||||
tty_flip_buffer_push(port);
|
||||
if (!sport->dma_idle_int)
|
||||
@@ -1335,9 +1335,9 @@ static void lpuart_timer_func(struct timer_list *t)
|
||||
mod_timer(&sport->lpuart_timer,
|
||||
jiffies + sport->dma_rx_timeout);
|
||||
|
||||
- if (spin_trylock_irqsave(&sport->port.lock, flags)) {
|
||||
+ if (uart_port_trylock_irqsave(&sport->port, &flags)) {
|
||||
sport->last_residue = state.residue;
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1802,14 +1802,14 @@ static void lpuart_hw_setup(struct lpuart_port *sport)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
|
||||
lpuart_setup_watermark_enable(sport);
|
||||
|
||||
lpuart_rx_dma_startup(sport);
|
||||
lpuart_tx_dma_startup(sport);
|
||||
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
}
|
||||
|
||||
static int lpuart_startup(struct uart_port *port)
|
||||
@@ -1859,7 +1859,7 @@ static void lpuart32_hw_setup(struct lpuart_port *sport)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
|
||||
lpuart32_hw_disable(sport);
|
||||
|
||||
@@ -1869,7 +1869,7 @@ static void lpuart32_hw_setup(struct lpuart_port *sport)
|
||||
lpuart32_setup_watermark_enable(sport);
|
||||
lpuart32_configure(sport);
|
||||
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
}
|
||||
|
||||
static int lpuart32_startup(struct uart_port *port)
|
||||
@@ -1932,7 +1932,7 @@ static void lpuart_shutdown(struct uart_port *port)
|
||||
unsigned char temp;
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
/* disable Rx/Tx and interrupts */
|
||||
temp = readb(port->membase + UARTCR2);
|
||||
@@ -1940,7 +1940,7 @@ static void lpuart_shutdown(struct uart_port *port)
|
||||
UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_RIE);
|
||||
writeb(temp, port->membase + UARTCR2);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
lpuart_dma_shutdown(sport);
|
||||
}
|
||||
@@ -1952,7 +1952,7 @@ static void lpuart32_shutdown(struct uart_port *port)
|
||||
unsigned long temp;
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
/* clear status */
|
||||
temp = lpuart32_read(&sport->port, UARTSTAT);
|
||||
@@ -1969,7 +1969,7 @@ static void lpuart32_shutdown(struct uart_port *port)
|
||||
UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE | UARTCTRL_SBK);
|
||||
lpuart32_write(port, temp, UARTCTRL);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
lpuart_dma_shutdown(sport);
|
||||
}
|
||||
@@ -2069,7 +2069,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
if (old && sport->lpuart_dma_rx_use)
|
||||
lpuart_dma_rx_free(&sport->port);
|
||||
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
|
||||
sport->port.read_status_mask = 0;
|
||||
if (termios->c_iflag & INPCK)
|
||||
@@ -2124,7 +2124,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
sport->lpuart_dma_rx_use = false;
|
||||
}
|
||||
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
}
|
||||
|
||||
static void __lpuart32_serial_setbrg(struct uart_port *port,
|
||||
@@ -2304,7 +2304,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
if (old && sport->lpuart_dma_rx_use)
|
||||
lpuart_dma_rx_free(&sport->port);
|
||||
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
|
||||
sport->port.read_status_mask = 0;
|
||||
if (termios->c_iflag & INPCK)
|
||||
@@ -2359,7 +2359,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
sport->lpuart_dma_rx_use = false;
|
||||
}
|
||||
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
}
|
||||
|
||||
static const char *lpuart_type(struct uart_port *port)
|
||||
@@ -2477,9 +2477,9 @@ lpuart_console_write(struct console *co, const char *s, unsigned int count)
|
||||
int locked = 1;
|
||||
|
||||
if (oops_in_progress)
|
||||
- locked = spin_trylock_irqsave(&sport->port.lock, flags);
|
||||
+ locked = uart_port_trylock_irqsave(&sport->port, &flags);
|
||||
else
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
|
||||
/* first save CR2 and then disable interrupts */
|
||||
cr2 = old_cr2 = readb(sport->port.membase + UARTCR2);
|
||||
@@ -2495,7 +2495,7 @@ lpuart_console_write(struct console *co, const char *s, unsigned int count)
|
||||
writeb(old_cr2, sport->port.membase + UARTCR2);
|
||||
|
||||
if (locked)
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -2507,9 +2507,9 @@ lpuart32_console_write(struct console *co, const char *s, unsigned int count)
|
||||
int locked = 1;
|
||||
|
||||
if (oops_in_progress)
|
||||
- locked = spin_trylock_irqsave(&sport->port.lock, flags);
|
||||
+ locked = uart_port_trylock_irqsave(&sport->port, &flags);
|
||||
else
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
|
||||
/* first save CR2 and then disable interrupts */
|
||||
cr = old_cr = lpuart32_read(&sport->port, UARTCTRL);
|
||||
@@ -2525,7 +2525,7 @@ lpuart32_console_write(struct console *co, const char *s, unsigned int count)
|
||||
lpuart32_write(&sport->port, old_cr, UARTCTRL);
|
||||
|
||||
if (locked)
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -3089,7 +3089,7 @@ static int lpuart_suspend(struct device *dev)
|
||||
uart_suspend_port(&lpuart_reg, &sport->port);
|
||||
|
||||
if (lpuart_uport_is_active(sport)) {
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
if (lpuart_is_32(sport)) {
|
||||
/* disable Rx/Tx and interrupts */
|
||||
temp = lpuart32_read(&sport->port, UARTCTRL);
|
||||
@@ -3101,7 +3101,7 @@ static int lpuart_suspend(struct device *dev)
|
||||
temp &= ~(UARTCR2_TE | UARTCR2_TIE | UARTCR2_TCIE);
|
||||
writeb(temp, sport->port.membase + UARTCR2);
|
||||
}
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
|
||||
if (sport->lpuart_dma_rx_use) {
|
||||
/*
|
||||
@@ -3114,7 +3114,7 @@ static int lpuart_suspend(struct device *dev)
|
||||
lpuart_dma_rx_free(&sport->port);
|
||||
|
||||
/* Disable Rx DMA to use UART port as wakeup source */
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
if (lpuart_is_32(sport)) {
|
||||
temp = lpuart32_read(&sport->port, UARTBAUD);
|
||||
lpuart32_write(&sport->port, temp & ~UARTBAUD_RDMAE,
|
||||
@@ -3123,11 +3123,11 @@ static int lpuart_suspend(struct device *dev)
|
||||
writeb(readb(sport->port.membase + UARTCR5) &
|
||||
~UARTCR5_RDMAS, sport->port.membase + UARTCR5);
|
||||
}
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
}
|
||||
|
||||
if (sport->lpuart_dma_tx_use) {
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
if (lpuart_is_32(sport)) {
|
||||
temp = lpuart32_read(&sport->port, UARTBAUD);
|
||||
temp &= ~UARTBAUD_TDMAE;
|
||||
@@ -3137,7 +3137,7 @@ static int lpuart_suspend(struct device *dev)
|
||||
temp &= ~UARTCR5_TDMAS;
|
||||
writeb(temp, sport->port.membase + UARTCR5);
|
||||
}
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
sport->dma_tx_in_progress = false;
|
||||
dmaengine_terminate_sync(sport->dma_tx_chan);
|
||||
}
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,107 +0,0 @@
|
|||
From f30dbde350cc4692abd4d2cb48a88913d94edf7d Mon Sep 17 00:00:00 2001
|
||||
From: Paolo Abeni <pabeni@redhat.com>
|
||||
Date: Mon, 8 May 2023 08:17:44 +0200
|
||||
Subject: [PATCH 55/62] revert: "softirq: Let ksoftirqd do its job"
|
||||
|
||||
Due to the mentioned commit, when the ksoftirqd processes take charge
|
||||
of softirq processing, the system can experience high latencies.
|
||||
|
||||
In the past a few workarounds have been implemented for specific
|
||||
side-effects of the above:
|
||||
|
||||
commit 1ff688209e2e ("watchdog: core: make sure the watchdog_worker is not deferred")
|
||||
commit 8d5755b3f77b ("watchdog: softdog: fire watchdog even if softirqs do not get to run")
|
||||
commit 217f69743681 ("net: busy-poll: allow preemption in sk_busy_loop()")
|
||||
commit 3c53776e29f8 ("Mark HI and TASKLET softirq synchronous")
|
||||
|
||||
but the latency problem still exists in real-life workloads, see the
|
||||
link below.
|
||||
|
||||
The reverted commit intended to solve a live-lock scenario that can now
|
||||
be addressed with the NAPI threaded mode, introduced with commit
|
||||
29863d41bb6e ("net: implement threaded-able napi poll loop support"),
|
||||
and nowadays in a pretty stable status.
|
||||
|
||||
While a complete solution to put softirq processing under nice resource
|
||||
control would be preferable, that has proven to be a very hard task. In
|
||||
the short term, remove the main pain point, and also simplify a bit the
|
||||
current softirq implementation.
|
||||
|
||||
Note that this change also reverts commit 3c53776e29f8 ("Mark HI and
|
||||
TASKLET softirq synchronous") and commit 1342d8080f61 ("softirq: Don't
|
||||
skip softirq execution when softirq thread is parking"), which are
|
||||
direct follow-ups of the feature commit. A single change is preferred to
|
||||
avoid known bad intermediate states introduced by a patch series
|
||||
reverting them individually.
|
||||
|
||||
Link: https://lore.kernel.org/netdev/305d7742212cbe98621b16be782b0562f1012cb6.camel@redhat.com/
|
||||
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
|
||||
Tested-by: Jason Xing <kerneljasonxing@gmail.com>
|
||||
Reviewed-by: Jakub Kicinski <kuba@kernel.org>
|
||||
Reviewed-by: Eric Dumazet <edumazet@google.com>
|
||||
Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/57e66b364f1b6f09c9bc0316742c3b14f4ce83bd.1683526542.git.pabeni@redhat.com
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
(cherry picked from commit b8a04a538ed4755dc97c403ee3b8dd882955c98c)
|
||||
Signed-off-by: Clark Williams <clark.williams@gmail.com>
|
||||
---
|
||||
kernel/softirq.c | 22 ++--------------------
|
||||
1 file changed, 2 insertions(+), 20 deletions(-)
|
||||
|
||||
diff --git a/kernel/softirq.c b/kernel/softirq.c
|
||||
index 82f3e68fbe22..af9e879bbbf7 100644
|
||||
--- a/kernel/softirq.c
|
||||
+++ b/kernel/softirq.c
|
||||
@@ -80,21 +80,6 @@ static void wakeup_softirqd(void)
|
||||
wake_up_process(tsk);
|
||||
}
|
||||
|
||||
-/*
|
||||
- * If ksoftirqd is scheduled, we do not want to process pending softirqs
|
||||
- * right now. Let ksoftirqd handle this at its own rate, to get fairness,
|
||||
- * unless we're doing some of the synchronous softirqs.
|
||||
- */
|
||||
-#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
|
||||
-static bool ksoftirqd_running(unsigned long pending)
|
||||
-{
|
||||
- struct task_struct *tsk = __this_cpu_read(ksoftirqd);
|
||||
-
|
||||
- if (pending & SOFTIRQ_NOW_MASK)
|
||||
- return false;
|
||||
- return tsk && task_is_running(tsk) && !__kthread_should_park(tsk);
|
||||
-}
|
||||
-
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
DEFINE_PER_CPU(int, hardirqs_enabled);
|
||||
DEFINE_PER_CPU(int, hardirq_context);
|
||||
@@ -236,7 +221,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
|
||||
goto out;
|
||||
|
||||
pending = local_softirq_pending();
|
||||
- if (!pending || ksoftirqd_running(pending))
|
||||
+ if (!pending)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
@@ -432,9 +417,6 @@ static inline bool should_wake_ksoftirqd(void)
|
||||
|
||||
static inline void invoke_softirq(void)
|
||||
{
|
||||
- if (ksoftirqd_running(local_softirq_pending()))
|
||||
- return;
|
||||
-
|
||||
if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
|
||||
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
|
||||
/*
|
||||
@@ -468,7 +450,7 @@ asmlinkage __visible void do_softirq(void)
|
||||
|
||||
pending = local_softirq_pending();
|
||||
|
||||
- if (pending && !ksoftirqd_running(pending))
|
||||
+ if (pending)
|
||||
do_softirq_own_stack();
|
||||
|
||||
local_irq_restore(flags);
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,156 @@
|
|||
From c05570c9a89bbc1bdcdf29372853012b7f78d8f6 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:45 +0206
|
||||
Subject: [PATCH 055/195] serial: icom: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-29-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/icom.c | 26 +++++++++++++-------------
|
||||
1 file changed, 13 insertions(+), 13 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
|
||||
index 819f957b6b84..a75eafbcbea3 100644
|
||||
--- a/drivers/tty/serial/icom.c
|
||||
+++ b/drivers/tty/serial/icom.c
|
||||
@@ -929,7 +929,7 @@ static inline void check_modem_status(struct icom_port *icom_port)
|
||||
char delta_status;
|
||||
unsigned char status;
|
||||
|
||||
- spin_lock(&icom_port->uart_port.lock);
|
||||
+ uart_port_lock(&icom_port->uart_port);
|
||||
|
||||
/*modem input register */
|
||||
status = readb(&icom_port->dram->isr);
|
||||
@@ -951,7 +951,7 @@ static inline void check_modem_status(struct icom_port *icom_port)
|
||||
port.delta_msr_wait);
|
||||
old_status = status;
|
||||
}
|
||||
- spin_unlock(&icom_port->uart_port.lock);
|
||||
+ uart_port_unlock(&icom_port->uart_port);
|
||||
}
|
||||
|
||||
static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port)
|
||||
@@ -1093,7 +1093,7 @@ static void process_interrupt(u16 port_int_reg,
|
||||
struct icom_port *icom_port)
|
||||
{
|
||||
|
||||
- spin_lock(&icom_port->uart_port.lock);
|
||||
+ uart_port_lock(&icom_port->uart_port);
|
||||
trace(icom_port, "INTERRUPT", port_int_reg);
|
||||
|
||||
if (port_int_reg & (INT_XMIT_COMPLETED | INT_XMIT_DISABLED))
|
||||
@@ -1102,7 +1102,7 @@ static void process_interrupt(u16 port_int_reg,
|
||||
if (port_int_reg & INT_RCV_COMPLETED)
|
||||
recv_interrupt(port_int_reg, icom_port);
|
||||
|
||||
- spin_unlock(&icom_port->uart_port.lock);
|
||||
+ uart_port_unlock(&icom_port->uart_port);
|
||||
}
|
||||
|
||||
static irqreturn_t icom_interrupt(int irq, void *dev_id)
|
||||
@@ -1186,14 +1186,14 @@ static unsigned int icom_tx_empty(struct uart_port *port)
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
if (le16_to_cpu(icom_port->statStg->xmit[0].flags) &
|
||||
SA_FLAGS_READY_TO_XMIT)
|
||||
ret = TIOCSER_TEMT;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1276,7 +1276,7 @@ static void icom_send_xchar(struct uart_port *port, char ch)
|
||||
|
||||
/* wait .1 sec to send char */
|
||||
for (index = 0; index < 10; index++) {
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
xdata = readb(&icom_port->dram->xchar);
|
||||
if (xdata == 0x00) {
|
||||
trace(icom_port, "QUICK_WRITE", 0);
|
||||
@@ -1284,10 +1284,10 @@ static void icom_send_xchar(struct uart_port *port, char ch)
|
||||
|
||||
/* flush write operation */
|
||||
xdata = readb(&icom_port->dram->xchar);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
break;
|
||||
}
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
msleep(10);
|
||||
}
|
||||
}
|
||||
@@ -1307,7 +1307,7 @@ static void icom_break(struct uart_port *port, int break_state)
|
||||
unsigned char cmdReg;
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
trace(icom_port, "BREAK", 0);
|
||||
cmdReg = readb(&icom_port->dram->CmdReg);
|
||||
if (break_state == -1) {
|
||||
@@ -1315,7 +1315,7 @@ static void icom_break(struct uart_port *port, int break_state)
|
||||
} else {
|
||||
writeb(cmdReg & ~CMD_SND_BREAK, &icom_port->dram->CmdReg);
|
||||
}
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static int icom_open(struct uart_port *port)
|
||||
@@ -1365,7 +1365,7 @@ static void icom_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
unsigned long offset;
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
trace(icom_port, "CHANGE_SPEED", 0);
|
||||
|
||||
cflag = termios->c_cflag;
|
||||
@@ -1516,7 +1516,7 @@ static void icom_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
trace(icom_port, "XR_ENAB", 0);
|
||||
writeb(CMD_XMIT_RCV_ENABLE, &icom_port->dram->CmdReg);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static const char *icom_type(struct uart_port *port)
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,176 +0,0 @@
|
|||
From 4937a4dc4d39cff4badeb727c8d6c1ecf4422a0d Mon Sep 17 00:00:00 2001
|
||||
From: Peter Zijlstra <peterz@infradead.org>
|
||||
Date: Tue, 25 Apr 2023 17:03:13 +0200
|
||||
Subject: [PATCH 56/62] debugobjects,locking: Annotate debug_object_fill_pool()
|
||||
wait type violation
|
||||
|
||||
There is an explicit wait-type violation in debug_object_fill_pool()
|
||||
for PREEMPT_RT=n kernels which allows them to more easily fill the
|
||||
object pool and reduce the chance of allocation failures.
|
||||
|
||||
Lockdep's wait-type checks are designed to check the PREEMPT_RT
|
||||
locking rules even for PREEMPT_RT=n kernels and object to this, so
|
||||
create a lockdep annotation to allow this to stand.
|
||||
|
||||
Specifically, create a 'lock' type that overrides the inner wait-type
|
||||
while it is held -- allowing one to temporarily raise it, such that
|
||||
the violation is hidden.
|
||||
|
||||
Reported-by: Vlastimil Babka <vbabka@suse.cz>
|
||||
Reported-by: Qi Zheng <zhengqi.arch@bytedance.com>
|
||||
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
||||
Tested-by: Qi Zheng <zhengqi.arch@bytedance.com>
|
||||
Link: https://lkml.kernel.org/r/20230429100614.GA1489784@hirez.programming.kicks-ass.net
|
||||
(cherry picked from commit 0cce06ba859a515bd06224085d3addb870608b6d)
|
||||
Signed-off-by: Clark Williams <clark.williams@gmail.com>
|
||||
---
|
||||
include/linux/lockdep.h | 14 ++++++++++++++
|
||||
include/linux/lockdep_types.h | 1 +
|
||||
kernel/locking/lockdep.c | 28 +++++++++++++++++++++-------
|
||||
lib/debugobjects.c | 15 +++++++++++++--
|
||||
4 files changed, 49 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
|
||||
index 1023f349af71..a3329fb49b33 100644
|
||||
--- a/include/linux/lockdep.h
|
||||
+++ b/include/linux/lockdep.h
|
||||
@@ -339,6 +339,16 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
|
||||
#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
|
||||
#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
|
||||
|
||||
+/*
|
||||
+ * Must use lock_map_aquire_try() with override maps to avoid
|
||||
+ * lockdep thinking they participate in the block chain.
|
||||
+ */
|
||||
+#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \
|
||||
+ struct lockdep_map _name = { \
|
||||
+ .name = #_name "-wait-type-override", \
|
||||
+ .wait_type_inner = _wait_type, \
|
||||
+ .lock_type = LD_LOCK_WAIT_OVERRIDE, }
|
||||
+
|
||||
#else /* !CONFIG_LOCKDEP */
|
||||
|
||||
static inline void lockdep_init_task(struct task_struct *task)
|
||||
@@ -427,6 +437,9 @@ extern int lockdep_is_held(const void *);
|
||||
#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
|
||||
#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
|
||||
|
||||
+#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \
|
||||
+ struct lockdep_map __maybe_unused _name = {}
|
||||
+
|
||||
#endif /* !LOCKDEP */
|
||||
|
||||
enum xhlock_context_t {
|
||||
@@ -551,6 +564,7 @@ do { \
|
||||
#define rwsem_release(l, i) lock_release(l, i)
|
||||
|
||||
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
|
||||
+#define lock_map_acquire_try(l) lock_acquire_exclusive(l, 0, 1, NULL, _THIS_IP_)
|
||||
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
|
||||
#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
|
||||
#define lock_map_release(l) lock_release(l, _THIS_IP_)
|
||||
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
|
||||
index d22430840b53..59f4fb1626ea 100644
|
||||
--- a/include/linux/lockdep_types.h
|
||||
+++ b/include/linux/lockdep_types.h
|
||||
@@ -33,6 +33,7 @@ enum lockdep_wait_type {
|
||||
enum lockdep_lock_type {
|
||||
LD_LOCK_NORMAL = 0, /* normal, catch all */
|
||||
LD_LOCK_PERCPU, /* percpu */
|
||||
+ LD_LOCK_WAIT_OVERRIDE, /* annotation */
|
||||
LD_LOCK_MAX,
|
||||
};
|
||||
|
||||
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
|
||||
index 3b38303ed27b..a046e03c7ead 100644
|
||||
--- a/kernel/locking/lockdep.c
|
||||
+++ b/kernel/locking/lockdep.c
|
||||
@@ -2245,6 +2245,9 @@ static inline bool usage_match(struct lock_list *entry, void *mask)
|
||||
|
||||
static inline bool usage_skip(struct lock_list *entry, void *mask)
|
||||
{
|
||||
+ if (entry->class->lock_type == LD_LOCK_NORMAL)
|
||||
+ return false;
|
||||
+
|
||||
/*
|
||||
* Skip local_lock() for irq inversion detection.
|
||||
*
|
||||
@@ -2271,14 +2274,16 @@ static inline bool usage_skip(struct lock_list *entry, void *mask)
|
||||
* As a result, we will skip local_lock(), when we search for irq
|
||||
* inversion bugs.
|
||||
*/
|
||||
- if (entry->class->lock_type == LD_LOCK_PERCPU) {
|
||||
- if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
|
||||
- return false;
|
||||
+ if (entry->class->lock_type == LD_LOCK_PERCPU &&
|
||||
+ DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
|
||||
+ return false;
|
||||
|
||||
- return true;
|
||||
- }
|
||||
+ /*
|
||||
+ * Skip WAIT_OVERRIDE for irq inversion detection -- it's not actually
|
||||
+ * a lock and only used to override the wait_type.
|
||||
+ */
|
||||
|
||||
- return false;
|
||||
+ return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -4745,7 +4750,8 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
|
||||
|
||||
for (; depth < curr->lockdep_depth; depth++) {
|
||||
struct held_lock *prev = curr->held_locks + depth;
|
||||
- u8 prev_inner = hlock_class(prev)->wait_type_inner;
|
||||
+ struct lock_class *class = hlock_class(prev);
|
||||
+ u8 prev_inner = class->wait_type_inner;
|
||||
|
||||
if (prev_inner) {
|
||||
/*
|
||||
@@ -4755,6 +4761,14 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
|
||||
* Also due to trylocks.
|
||||
*/
|
||||
curr_inner = min(curr_inner, prev_inner);
|
||||
+
|
||||
+ /*
|
||||
+ * Allow override for annotations -- this is typically
|
||||
+ * only valid/needed for code that only exists when
|
||||
+ * CONFIG_PREEMPT_RT=n.
|
||||
+ */
|
||||
+ if (unlikely(class->lock_type == LD_LOCK_WAIT_OVERRIDE))
|
||||
+ curr_inner = prev_inner;
|
||||
}
|
||||
}
|
||||
|
||||
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
|
||||
index dacb80c22c4f..3c9e00e207dc 100644
|
||||
--- a/lib/debugobjects.c
|
||||
+++ b/lib/debugobjects.c
|
||||
@@ -600,10 +600,21 @@ static void debug_objects_fill_pool(void)
|
||||
{
|
||||
/*
|
||||
* On RT enabled kernels the pool refill must happen in preemptible
|
||||
- * context:
|
||||
+ * context -- for !RT kernels we rely on the fact that spinlock_t and
|
||||
+ * raw_spinlock_t are basically the same type and this lock-type
|
||||
+ * inversion works just fine.
|
||||
*/
|
||||
- if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
|
||||
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
|
||||
+ /*
|
||||
+ * Annotate away the spinlock_t inside raw_spinlock_t warning
|
||||
+ * by temporarily raising the wait-type to WAIT_SLEEP, matching
|
||||
+ * the preemptible() condition above.
|
||||
+ */
|
||||
+ static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
|
||||
+ lock_map_acquire_try(&fill_pool_map);
|
||||
fill_pool();
|
||||
+ lock_map_release(&fill_pool_map);
|
||||
+ }
|
||||
}
|
||||
|
||||
static void
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,359 @@
|
|||
From cc5463fe75395f9912053e3256c5c78f7313c094 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:46 +0206
|
||||
Subject: [PATCH 056/195] serial: imx: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-30-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/imx.c | 84 ++++++++++++++++++++--------------------
|
||||
1 file changed, 42 insertions(+), 42 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
|
||||
index cd36251ba1c0..4123cb76c324 100644
|
||||
--- a/drivers/tty/serial/imx.c
|
||||
+++ b/drivers/tty/serial/imx.c
|
||||
@@ -575,7 +575,7 @@ static void imx_uart_dma_tx_callback(void *data)
|
||||
unsigned long flags;
|
||||
u32 ucr1;
|
||||
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
|
||||
dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
|
||||
|
||||
@@ -600,7 +600,7 @@ static void imx_uart_dma_tx_callback(void *data)
|
||||
imx_uart_writel(sport, ucr4, UCR4);
|
||||
}
|
||||
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
}
|
||||
|
||||
/* called with port.lock taken and irqs off */
|
||||
@@ -766,11 +766,11 @@ static irqreturn_t imx_uart_rtsint(int irq, void *dev_id)
|
||||
struct imx_port *sport = dev_id;
|
||||
irqreturn_t ret;
|
||||
|
||||
- spin_lock(&sport->port.lock);
|
||||
+ uart_port_lock(&sport->port);
|
||||
|
||||
ret = __imx_uart_rtsint(irq, dev_id);
|
||||
|
||||
- spin_unlock(&sport->port.lock);
|
||||
+ uart_port_unlock(&sport->port);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -779,9 +779,9 @@ static irqreturn_t imx_uart_txint(int irq, void *dev_id)
|
||||
{
|
||||
struct imx_port *sport = dev_id;
|
||||
|
||||
- spin_lock(&sport->port.lock);
|
||||
+ uart_port_lock(&sport->port);
|
||||
imx_uart_transmit_buffer(sport);
|
||||
- spin_unlock(&sport->port.lock);
|
||||
+ uart_port_unlock(&sport->port);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@@ -895,11 +895,11 @@ static irqreturn_t imx_uart_rxint(int irq, void *dev_id)
|
||||
struct imx_port *sport = dev_id;
|
||||
irqreturn_t ret;
|
||||
|
||||
- spin_lock(&sport->port.lock);
|
||||
+ uart_port_lock(&sport->port);
|
||||
|
||||
ret = __imx_uart_rxint(irq, dev_id);
|
||||
|
||||
- spin_unlock(&sport->port.lock);
|
||||
+ uart_port_unlock(&sport->port);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -962,7 +962,7 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
|
||||
unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
|
||||
- spin_lock(&sport->port.lock);
|
||||
+ uart_port_lock(&sport->port);
|
||||
|
||||
usr1 = imx_uart_readl(sport, USR1);
|
||||
usr2 = imx_uart_readl(sport, USR2);
|
||||
@@ -1032,7 +1032,7 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
|
||||
ret = IRQ_HANDLED;
|
||||
}
|
||||
|
||||
- spin_unlock(&sport->port.lock);
|
||||
+ uart_port_unlock(&sport->port);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1115,7 +1115,7 @@ static void imx_uart_break_ctl(struct uart_port *port, int break_state)
|
||||
unsigned long flags;
|
||||
u32 ucr1;
|
||||
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
|
||||
ucr1 = imx_uart_readl(sport, UCR1) & ~UCR1_SNDBRK;
|
||||
|
||||
@@ -1124,7 +1124,7 @@ static void imx_uart_break_ctl(struct uart_port *port, int break_state)
|
||||
|
||||
imx_uart_writel(sport, ucr1, UCR1);
|
||||
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1137,9 +1137,9 @@ static void imx_uart_timeout(struct timer_list *t)
|
||||
unsigned long flags;
|
||||
|
||||
if (sport->port.state) {
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
imx_uart_mctrl_check(sport);
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
|
||||
mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
|
||||
}
|
||||
@@ -1169,9 +1169,9 @@ static void imx_uart_dma_rx_callback(void *data)
|
||||
status = dmaengine_tx_status(chan, sport->rx_cookie, &state);
|
||||
|
||||
if (status == DMA_ERROR) {
|
||||
- spin_lock(&sport->port.lock);
|
||||
+ uart_port_lock(&sport->port);
|
||||
imx_uart_clear_rx_errors(sport);
|
||||
- spin_unlock(&sport->port.lock);
|
||||
+ uart_port_unlock(&sport->port);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1200,9 +1200,9 @@ static void imx_uart_dma_rx_callback(void *data)
|
||||
r_bytes = rx_ring->head - rx_ring->tail;
|
||||
|
||||
/* If we received something, check for 0xff flood */
|
||||
- spin_lock(&sport->port.lock);
|
||||
+ uart_port_lock(&sport->port);
|
||||
imx_uart_check_flood(sport, imx_uart_readl(sport, USR2));
|
||||
- spin_unlock(&sport->port.lock);
|
||||
+ uart_port_unlock(&sport->port);
|
||||
|
||||
if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) {
|
||||
|
||||
@@ -1460,7 +1460,7 @@ static int imx_uart_startup(struct uart_port *port)
|
||||
if (!uart_console(port) && imx_uart_dma_init(sport) == 0)
|
||||
dma_is_inited = 1;
|
||||
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
|
||||
/* Reset fifo's and state machines */
|
||||
imx_uart_soft_reset(sport);
|
||||
@@ -1533,7 +1533,7 @@ static int imx_uart_startup(struct uart_port *port)
|
||||
|
||||
imx_uart_disable_loopback_rs485(sport);
|
||||
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1558,21 +1558,21 @@ static void imx_uart_shutdown(struct uart_port *port)
|
||||
sport->dma_is_rxing = 0;
|
||||
}
|
||||
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
imx_uart_stop_tx(port);
|
||||
imx_uart_stop_rx(port);
|
||||
imx_uart_disable_dma(sport);
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
imx_uart_dma_exit(sport);
|
||||
}
|
||||
|
||||
mctrl_gpio_disable_ms(sport->gpios);
|
||||
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
ucr2 = imx_uart_readl(sport, UCR2);
|
||||
ucr2 &= ~(UCR2_TXEN | UCR2_ATEN);
|
||||
imx_uart_writel(sport, ucr2, UCR2);
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
|
||||
/*
|
||||
* Stop our timer.
|
||||
@@ -1583,7 +1583,7 @@ static void imx_uart_shutdown(struct uart_port *port)
|
||||
* Disable all interrupts, port and break condition.
|
||||
*/
|
||||
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
|
||||
ucr1 = imx_uart_readl(sport, UCR1);
|
||||
ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_RXDMAEN |
|
||||
@@ -1605,7 +1605,7 @@ static void imx_uart_shutdown(struct uart_port *port)
|
||||
ucr4 &= ~UCR4_TCEN;
|
||||
imx_uart_writel(sport, ucr4, UCR4);
|
||||
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
|
||||
clk_disable_unprepare(sport->clk_per);
|
||||
clk_disable_unprepare(sport->clk_ipg);
|
||||
@@ -1668,7 +1668,7 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
|
||||
quot = uart_get_divisor(port, baud);
|
||||
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
|
||||
/*
|
||||
* Read current UCR2 and save it for future use, then clear all the bits
|
||||
@@ -1796,7 +1796,7 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
|
||||
imx_uart_enable_ms(&sport->port);
|
||||
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
}
|
||||
|
||||
static const char *imx_uart_type(struct uart_port *port)
|
||||
@@ -1858,7 +1858,7 @@ static int imx_uart_poll_init(struct uart_port *port)
|
||||
|
||||
imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
|
||||
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
|
||||
/*
|
||||
* Be careful about the order of enabling bits here. First enable the
|
||||
@@ -1886,7 +1886,7 @@ static int imx_uart_poll_init(struct uart_port *port)
|
||||
imx_uart_writel(sport, ucr1 | UCR1_RRDYEN, UCR1);
|
||||
imx_uart_writel(sport, ucr2 | UCR2_ATEN, UCR2);
|
||||
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -2005,9 +2005,9 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
|
||||
if (sport->port.sysrq)
|
||||
locked = 0;
|
||||
else if (oops_in_progress)
|
||||
- locked = spin_trylock_irqsave(&sport->port.lock, flags);
|
||||
+ locked = uart_port_trylock_irqsave(&sport->port, &flags);
|
||||
else
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
|
||||
/*
|
||||
* First, save UCR1/2/3 and then disable interrupts
|
||||
@@ -2035,7 +2035,7 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
|
||||
imx_uart_ucrs_restore(sport, &old_ucr);
|
||||
|
||||
if (locked)
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2193,10 +2193,10 @@ static enum hrtimer_restart imx_trigger_start_tx(struct hrtimer *t)
|
||||
struct imx_port *sport = container_of(t, struct imx_port, trigger_start_tx);
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
if (sport->tx_state == WAIT_AFTER_RTS)
|
||||
imx_uart_start_tx(&sport->port);
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
@@ -2206,10 +2206,10 @@ static enum hrtimer_restart imx_trigger_stop_tx(struct hrtimer *t)
|
||||
struct imx_port *sport = container_of(t, struct imx_port, trigger_stop_tx);
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
if (sport->tx_state == WAIT_AFTER_SEND)
|
||||
imx_uart_stop_tx(&sport->port);
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
@@ -2476,9 +2476,9 @@ static void imx_uart_restore_context(struct imx_port *sport)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
if (!sport->context_saved) {
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -2493,7 +2493,7 @@ static void imx_uart_restore_context(struct imx_port *sport)
|
||||
imx_uart_writel(sport, sport->saved_reg[2], UCR3);
|
||||
imx_uart_writel(sport, sport->saved_reg[3], UCR4);
|
||||
sport->context_saved = false;
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
}
|
||||
|
||||
static void imx_uart_save_context(struct imx_port *sport)
|
||||
@@ -2501,7 +2501,7 @@ static void imx_uart_save_context(struct imx_port *sport)
|
||||
unsigned long flags;
|
||||
|
||||
/* Save necessary regs */
|
||||
- spin_lock_irqsave(&sport->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&sport->port, &flags);
|
||||
sport->saved_reg[0] = imx_uart_readl(sport, UCR1);
|
||||
sport->saved_reg[1] = imx_uart_readl(sport, UCR2);
|
||||
sport->saved_reg[2] = imx_uart_readl(sport, UCR3);
|
||||
@@ -2513,7 +2513,7 @@ static void imx_uart_save_context(struct imx_port *sport)
|
||||
sport->saved_reg[8] = imx_uart_readl(sport, UBMR);
|
||||
sport->saved_reg[9] = imx_uart_readl(sport, IMX21_UTS);
|
||||
sport->context_saved = true;
|
||||
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&sport->port, flags);
|
||||
}
|
||||
|
||||
static void imx_uart_enable_wakeup(struct imx_port *sport, bool on)
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,51 +0,0 @@
|
|||
From 65a646612737cf862237312fd88afc0b0b6954d8 Mon Sep 17 00:00:00 2001
|
||||
From: Wander Lairson Costa <wander@redhat.com>
|
||||
Date: Wed, 14 Jun 2023 09:23:22 -0300
|
||||
Subject: [PATCH 57/62] sched: avoid false lockdep splat in put_task_struct()
|
||||
|
||||
In put_task_struct(), a spin_lock is indirectly acquired under the kernel
|
||||
stock. When running the kernel in real-time (RT) configuration, the
|
||||
operation is dispatched to a preemptible context call to ensure
|
||||
guaranteed preemption. However, if PROVE_RAW_LOCK_NESTING is enabled
|
||||
and __put_task_struct() is called while holding a raw_spinlock, lockdep
|
||||
incorrectly reports an "Invalid lock context" in the stock kernel.
|
||||
|
||||
This false splat occurs because lockdep is unaware of the different
|
||||
route taken under RT. To address this issue, override the inner wait
|
||||
type to prevent the false lockdep splat.
|
||||
|
||||
Signed-off-by: Wander Lairson Costa <wander@redhat.com>
|
||||
Suggested-by: Oleg Nesterov <oleg@redhat.com>
|
||||
Suggested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Suggested-by: Peter Zijlstra <peterz@infradead.org>
|
||||
Cc: Steven Rostedt <rostedt@goodmis.org>
|
||||
Cc: Luis Goncalves <lgoncalv@redhat.com>
|
||||
Link: https://lore.kernel.org/r/20230614122323.37957-3-wander@redhat.com
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
(cherry picked from commit a5e446e728e89d5f5c5e427cc919bc7813c64c28)
|
||||
Signed-off-by: Clark Williams <clark.williams@gmail.com>
|
||||
---
|
||||
include/linux/sched/task.h | 6 +++++-
|
||||
1 file changed, 5 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
|
||||
index 7291fb6399d2..de7ebd2bf3ba 100644
|
||||
--- a/include/linux/sched/task.h
|
||||
+++ b/include/linux/sched/task.h
|
||||
@@ -141,8 +141,12 @@ static inline void put_task_struct(struct task_struct *t)
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_RT) && !preemptible())
|
||||
call_rcu(&t->rcu, __put_task_struct_rcu_cb);
|
||||
- else
|
||||
+ else {
|
||||
+ static DEFINE_WAIT_OVERRIDE_MAP(put_task_map, LD_WAIT_SLEEP);
|
||||
+ lock_map_acquire_try(&put_task_map);
|
||||
__put_task_struct(t);
|
||||
+ lock_map_release(&put_task_map);
|
||||
+ }
|
||||
}
|
||||
|
||||
static inline void put_task_struct_many(struct task_struct *t, int nr)
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,190 @@
|
|||
From 7701e267b929a56a55983404c00d4736ffadb847 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:47 +0206
|
||||
Subject: [PATCH 057/195] serial: ip22zilog: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-31-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/ip22zilog.c | 36 +++++++++++++++++-----------------
|
||||
1 file changed, 18 insertions(+), 18 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
|
||||
index 845ff706bc59..320b29cd4683 100644
|
||||
--- a/drivers/tty/serial/ip22zilog.c
|
||||
+++ b/drivers/tty/serial/ip22zilog.c
|
||||
@@ -432,7 +432,7 @@ static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id)
|
||||
unsigned char r3;
|
||||
bool push = false;
|
||||
|
||||
- spin_lock(&up->port.lock);
|
||||
+ uart_port_lock(&up->port);
|
||||
r3 = read_zsreg(channel, R3);
|
||||
|
||||
/* Channel A */
|
||||
@@ -448,7 +448,7 @@ static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id)
|
||||
if (r3 & CHATxIP)
|
||||
ip22zilog_transmit_chars(up, channel);
|
||||
}
|
||||
- spin_unlock(&up->port.lock);
|
||||
+ uart_port_unlock(&up->port);
|
||||
|
||||
if (push)
|
||||
tty_flip_buffer_push(&up->port.state->port);
|
||||
@@ -458,7 +458,7 @@ static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id)
|
||||
channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
|
||||
push = false;
|
||||
|
||||
- spin_lock(&up->port.lock);
|
||||
+ uart_port_lock(&up->port);
|
||||
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
|
||||
writeb(RES_H_IUS, &channel->control);
|
||||
ZSDELAY();
|
||||
@@ -471,7 +471,7 @@ static irqreturn_t ip22zilog_interrupt(int irq, void *dev_id)
|
||||
if (r3 & CHBTxIP)
|
||||
ip22zilog_transmit_chars(up, channel);
|
||||
}
|
||||
- spin_unlock(&up->port.lock);
|
||||
+ uart_port_unlock(&up->port);
|
||||
|
||||
if (push)
|
||||
tty_flip_buffer_push(&up->port.state->port);
|
||||
@@ -504,11 +504,11 @@ static unsigned int ip22zilog_tx_empty(struct uart_port *port)
|
||||
unsigned char status;
|
||||
unsigned int ret;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
status = ip22zilog_read_channel_status(port);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
if (status & Tx_BUF_EMP)
|
||||
ret = TIOCSER_TEMT;
|
||||
@@ -664,7 +664,7 @@ static void ip22zilog_break_ctl(struct uart_port *port, int break_state)
|
||||
else
|
||||
clear_bits |= SND_BRK;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
new_reg = (up->curregs[R5] | set_bits) & ~clear_bits;
|
||||
if (new_reg != up->curregs[R5]) {
|
||||
@@ -674,7 +674,7 @@ static void ip22zilog_break_ctl(struct uart_port *port, int break_state)
|
||||
write_zsreg(channel, R5, up->curregs[R5]);
|
||||
}
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static void __ip22zilog_reset(struct uart_ip22zilog_port *up)
|
||||
@@ -735,9 +735,9 @@ static int ip22zilog_startup(struct uart_port *port)
|
||||
if (ZS_IS_CONS(up))
|
||||
return 0;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
__ip22zilog_startup(up);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -775,7 +775,7 @@ static void ip22zilog_shutdown(struct uart_port *port)
|
||||
if (ZS_IS_CONS(up))
|
||||
return;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
channel = ZILOG_CHANNEL_FROM_PORT(port);
|
||||
|
||||
@@ -788,7 +788,7 @@ static void ip22zilog_shutdown(struct uart_port *port)
|
||||
up->curregs[R5] &= ~SND_BRK;
|
||||
ip22zilog_maybe_update_regs(up, channel);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
/* Shared by TTY driver and serial console setup. The port lock is held
|
||||
@@ -880,7 +880,7 @@ ip22zilog_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
|
||||
baud = uart_get_baud_rate(port, termios, old, 1200, 76800);
|
||||
|
||||
- spin_lock_irqsave(&up->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||||
|
||||
brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
|
||||
|
||||
@@ -894,7 +894,7 @@ ip22zilog_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
ip22zilog_maybe_update_regs(up, ZILOG_CHANNEL_FROM_PORT(port));
|
||||
uart_update_timeout(port, termios->c_cflag, baud);
|
||||
|
||||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||||
}
|
||||
|
||||
static const char *ip22zilog_type(struct uart_port *port)
|
||||
@@ -1016,10 +1016,10 @@ ip22zilog_console_write(struct console *con, const char *s, unsigned int count)
|
||||
struct uart_ip22zilog_port *up = &ip22zilog_port_table[con->index];
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&up->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||||
uart_console_write(&up->port, s, count, ip22zilog_put_char);
|
||||
udelay(2);
|
||||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||||
}
|
||||
|
||||
static int __init ip22zilog_console_setup(struct console *con, char *options)
|
||||
@@ -1034,13 +1034,13 @@ static int __init ip22zilog_console_setup(struct console *con, char *options)
|
||||
|
||||
printk(KERN_INFO "Console: ttyS%d (IP22-Zilog)\n", con->index);
|
||||
|
||||
- spin_lock_irqsave(&up->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||||
|
||||
up->curregs[R15] |= BRKIE;
|
||||
|
||||
__ip22zilog_startup(up);
|
||||
|
||||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||||
|
||||
if (options)
|
||||
uart_parse_options(options, &baud, &parity, &bits, &flow);
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,96 +0,0 @@
|
|||
From 295da0c59de4794e746f56b1567e3188f9addc00 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Fri, 23 Jun 2023 22:15:17 +0200
|
||||
Subject: [PATCH 58/62] mm/page_alloc: Use write_seqlock_irqsave() instead
|
||||
write_seqlock() + local_irq_save().
|
||||
|
||||
__build_all_zonelists() acquires zonelist_update_seq by first disabling
|
||||
interrupts via local_irq_save() and then acquiring the seqlock with
|
||||
write_seqlock(). This is troublesome and leads to problems on
|
||||
PREEMPT_RT. The problem is that the inner spinlock_t becomes a sleeping
|
||||
lock on PREEMPT_RT and must not be acquired with disabled interrupts.
|
||||
|
||||
The API provides write_seqlock_irqsave() which does the right thing in
|
||||
one step.
|
||||
printk_deferred_enter() has to be invoked in non-migrate-able context to
|
||||
ensure that deferred printing is enabled and disabled on the same CPU.
|
||||
This is the case after zonelist_update_seq has been acquired.
|
||||
|
||||
There was discussion on the first submission that the order should be:
|
||||
local_irq_disable();
|
||||
printk_deferred_enter();
|
||||
write_seqlock();
|
||||
|
||||
to avoid pitfalls like having an unaccounted printk() coming from
|
||||
write_seqlock_irqsave() before printk_deferred_enter() is invoked. The
|
||||
only origin of such a printk() can be a lockdep splat because the
|
||||
lockdep annotation happens after the sequence count is incremented.
|
||||
This is exceptional and subject to change.
|
||||
|
||||
It was also pointed that PREEMPT_RT can be affected by the printk
|
||||
problem since its write_seqlock_irqsave() does not really disable
|
||||
interrupts. This isn't the case because PREEMPT_RT's printk
|
||||
implementation differs from the mainline implementation in two important
|
||||
aspects:
|
||||
- Printing happens in a dedicated threads and not at during the
|
||||
invocation of printk().
|
||||
- In emergency cases where synchronous printing is used, a different
|
||||
driver is used which does not use tty_port::lock.
|
||||
|
||||
Acquire zonelist_update_seq with write_seqlock_irqsave() and then defer
|
||||
printk output.
|
||||
|
||||
Fixes: 1007843a91909 ("mm/page_alloc: fix potential deadlock on zonelist_update_seq seqlock")
|
||||
Acked-by: Michal Hocko <mhocko@suse.com>
|
||||
Reviewed-by: David Hildenbrand <david@redhat.com>
|
||||
Link: https://lore.kernel.org/r/20230623201517.yw286Knb@linutronix.de
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
(cherry picked from commit 4d1139baae8bc4fff3728d1d204bdb04c13dbe10)
|
||||
Signed-off-by: Clark Williams <clark.williams@gmail.com>
|
||||
---
|
||||
mm/page_alloc.c | 15 ++++++---------
|
||||
1 file changed, 6 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
|
||||
index 4583f8a42d91..835b69a64f4f 100644
|
||||
--- a/mm/page_alloc.c
|
||||
+++ b/mm/page_alloc.c
|
||||
@@ -6588,19 +6588,17 @@ static void __build_all_zonelists(void *data)
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
- * Explicitly disable this CPU's interrupts before taking seqlock
|
||||
- * to prevent any IRQ handler from calling into the page allocator
|
||||
- * (e.g. GFP_ATOMIC) that could hit zonelist_iter_begin and livelock.
|
||||
+ * The zonelist_update_seq must be acquired with irqsave because the
|
||||
+ * reader can be invoked from IRQ with GFP_ATOMIC.
|
||||
*/
|
||||
- local_irq_save(flags);
|
||||
+ write_seqlock_irqsave(&zonelist_update_seq, flags);
|
||||
/*
|
||||
- * Explicitly disable this CPU's synchronous printk() before taking
|
||||
- * seqlock to prevent any printk() from trying to hold port->lock, for
|
||||
+ * Also disable synchronous printk() to prevent any printk() from
|
||||
+ * trying to hold port->lock, for
|
||||
* tty_insert_flip_string_and_push_buffer() on other CPU might be
|
||||
* calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held.
|
||||
*/
|
||||
printk_deferred_enter();
|
||||
- write_seqlock(&zonelist_update_seq);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
memset(node_load, 0, sizeof(node_load));
|
||||
@@ -6637,9 +6635,8 @@ static void __build_all_zonelists(void *data)
|
||||
#endif
|
||||
}
|
||||
|
||||
- write_sequnlock(&zonelist_update_seq);
|
||||
printk_deferred_exit();
|
||||
- local_irq_restore(flags);
|
||||
+ write_sequnlock_irqrestore(&zonelist_update_seq, flags);
|
||||
}
|
||||
|
||||
static noinline void __init
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,131 @@
|
|||
From 05c4ac5f565e1ee2b28fe1c1d1fa490d7fa6b766 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:48 +0206
|
||||
Subject: [PATCH 058/195] serial: jsm: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-32-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/jsm/jsm_neo.c | 4 ++--
|
||||
drivers/tty/serial/jsm/jsm_tty.c | 16 ++++++++--------
|
||||
2 files changed, 10 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
|
||||
index 0c78f66276cd..2bd640428970 100644
|
||||
--- a/drivers/tty/serial/jsm/jsm_neo.c
|
||||
+++ b/drivers/tty/serial/jsm/jsm_neo.c
|
||||
@@ -816,9 +816,9 @@ static void neo_parse_isr(struct jsm_board *brd, u32 port)
|
||||
/* Parse any modem signal changes */
|
||||
jsm_dbg(INTR, &ch->ch_bd->pci_dev,
|
||||
"MOD_STAT: sending to parse_modem_sigs\n");
|
||||
- spin_lock_irqsave(&ch->uart_port.lock, lock_flags);
|
||||
+ uart_port_lock_irqsave(&ch->uart_port, &lock_flags);
|
||||
neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
|
||||
- spin_unlock_irqrestore(&ch->uart_port.lock, lock_flags);
|
||||
+ uart_port_unlock_irqrestore(&ch->uart_port, lock_flags);
|
||||
}
|
||||
}
|
||||
|
||||
diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
|
||||
index 222afc270c88..ce0fef7e2c66 100644
|
||||
--- a/drivers/tty/serial/jsm/jsm_tty.c
|
||||
+++ b/drivers/tty/serial/jsm/jsm_tty.c
|
||||
@@ -152,14 +152,14 @@ static void jsm_tty_send_xchar(struct uart_port *port, char ch)
|
||||
container_of(port, struct jsm_channel, uart_port);
|
||||
struct ktermios *termios;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, lock_flags);
|
||||
+ uart_port_lock_irqsave(port, &lock_flags);
|
||||
termios = &port->state->port.tty->termios;
|
||||
if (ch == termios->c_cc[VSTART])
|
||||
channel->ch_bd->bd_ops->send_start_character(channel);
|
||||
|
||||
if (ch == termios->c_cc[VSTOP])
|
||||
channel->ch_bd->bd_ops->send_stop_character(channel);
|
||||
- spin_unlock_irqrestore(&port->lock, lock_flags);
|
||||
+ uart_port_unlock_irqrestore(port, lock_flags);
|
||||
}
|
||||
|
||||
static void jsm_tty_stop_rx(struct uart_port *port)
|
||||
@@ -176,13 +176,13 @@ static void jsm_tty_break(struct uart_port *port, int break_state)
|
||||
struct jsm_channel *channel =
|
||||
container_of(port, struct jsm_channel, uart_port);
|
||||
|
||||
- spin_lock_irqsave(&port->lock, lock_flags);
|
||||
+ uart_port_lock_irqsave(port, &lock_flags);
|
||||
if (break_state == -1)
|
||||
channel->ch_bd->bd_ops->send_break(channel);
|
||||
else
|
||||
channel->ch_bd->bd_ops->clear_break(channel);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, lock_flags);
|
||||
+ uart_port_unlock_irqrestore(port, lock_flags);
|
||||
}
|
||||
|
||||
static int jsm_tty_open(struct uart_port *port)
|
||||
@@ -241,7 +241,7 @@ static int jsm_tty_open(struct uart_port *port)
|
||||
channel->ch_cached_lsr = 0;
|
||||
channel->ch_stops_sent = 0;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, lock_flags);
|
||||
+ uart_port_lock_irqsave(port, &lock_flags);
|
||||
termios = &port->state->port.tty->termios;
|
||||
channel->ch_c_cflag = termios->c_cflag;
|
||||
channel->ch_c_iflag = termios->c_iflag;
|
||||
@@ -261,7 +261,7 @@ static int jsm_tty_open(struct uart_port *port)
|
||||
jsm_carrier(channel);
|
||||
|
||||
channel->ch_open_count++;
|
||||
- spin_unlock_irqrestore(&port->lock, lock_flags);
|
||||
+ uart_port_unlock_irqrestore(port, lock_flags);
|
||||
|
||||
jsm_dbg(OPEN, &channel->ch_bd->pci_dev, "finish\n");
|
||||
return 0;
|
||||
@@ -307,7 +307,7 @@ static void jsm_tty_set_termios(struct uart_port *port,
|
||||
struct jsm_channel *channel =
|
||||
container_of(port, struct jsm_channel, uart_port);
|
||||
|
||||
- spin_lock_irqsave(&port->lock, lock_flags);
|
||||
+ uart_port_lock_irqsave(port, &lock_flags);
|
||||
channel->ch_c_cflag = termios->c_cflag;
|
||||
channel->ch_c_iflag = termios->c_iflag;
|
||||
channel->ch_c_oflag = termios->c_oflag;
|
||||
@@ -317,7 +317,7 @@ static void jsm_tty_set_termios(struct uart_port *port,
|
||||
|
||||
channel->ch_bd->bd_ops->param(channel);
|
||||
jsm_carrier(channel);
|
||||
- spin_unlock_irqrestore(&port->lock, lock_flags);
|
||||
+ uart_port_unlock_irqrestore(port, lock_flags);
|
||||
}
|
||||
|
||||
static const char *jsm_tty_type(struct uart_port *port)
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,119 +0,0 @@
|
|||
From 88ec5664a42344c84c246d13dd726a4cbe2d9e8d Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Date: Wed, 14 Jun 2023 10:34:30 +0200
|
||||
Subject: [PATCH 59/62] bpf: Remove in_atomic() from bpf_link_put().
|
||||
|
||||
bpf_free_inode() is invoked as a RCU callback. Usually RCU callbacks are
|
||||
invoked within softirq context. By setting rcutree.use_softirq=0 boot
|
||||
option the RCU callbacks will be invoked in a per-CPU kthread with
|
||||
bottom halves disabled which implies a RCU read section.
|
||||
|
||||
On PREEMPT_RT the context remains fully preemptible. The RCU read
|
||||
section however does not allow schedule() invocation. The latter happens
|
||||
in mutex_lock() performed by bpf_trampoline_unlink_prog() originated
|
||||
from bpf_link_put().
|
||||
|
||||
It was pointed out that the bpf_link_put() invocation should not be
|
||||
delayed if originated from close(). It was also pointed out that other
|
||||
invocations from within a syscall should also avoid the workqueue.
|
||||
Everyone else should use workqueue by default to remain safe in the
|
||||
future (while auditing the code, every caller was preemptible except for
|
||||
the RCU case).
|
||||
|
||||
Let bpf_link_put() use the worker unconditionally. Add
|
||||
bpf_link_put_direct() which will directly free the resources and is used
|
||||
by close() and from within __sys_bpf().
|
||||
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
|
||||
Link: https://lore.kernel.org/bpf/20230614083430.oENawF8f@linutronix.de
|
||||
(cherry picked from commit ab5d47bd41b1db82c295b0e751e2b822b43a4b5a)
|
||||
Signed-off-by: Clark Williams <clark.williams@gmail.com>
|
||||
---
|
||||
kernel/bpf/syscall.c | 29 ++++++++++++++++-------------
|
||||
1 file changed, 16 insertions(+), 13 deletions(-)
|
||||
|
||||
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
|
||||
index c0915e2424f1..f8ba6e0a5c08 100644
|
||||
--- a/kernel/bpf/syscall.c
|
||||
+++ b/kernel/bpf/syscall.c
|
||||
@@ -2732,28 +2732,31 @@ static void bpf_link_put_deferred(struct work_struct *work)
|
||||
bpf_link_free(link);
|
||||
}
|
||||
|
||||
-/* bpf_link_put can be called from atomic context, but ensures that resources
|
||||
- * are freed from process context
|
||||
+/* bpf_link_put might be called from atomic context. It needs to be called
|
||||
+ * from sleepable context in order to acquire sleeping locks during the process.
|
||||
*/
|
||||
void bpf_link_put(struct bpf_link *link)
|
||||
{
|
||||
if (!atomic64_dec_and_test(&link->refcnt))
|
||||
return;
|
||||
|
||||
- if (in_atomic()) {
|
||||
- INIT_WORK(&link->work, bpf_link_put_deferred);
|
||||
- schedule_work(&link->work);
|
||||
- } else {
|
||||
- bpf_link_free(link);
|
||||
- }
|
||||
+ INIT_WORK(&link->work, bpf_link_put_deferred);
|
||||
+ schedule_work(&link->work);
|
||||
}
|
||||
EXPORT_SYMBOL(bpf_link_put);
|
||||
|
||||
+static void bpf_link_put_direct(struct bpf_link *link)
|
||||
+{
|
||||
+ if (!atomic64_dec_and_test(&link->refcnt))
|
||||
+ return;
|
||||
+ bpf_link_free(link);
|
||||
+}
|
||||
+
|
||||
static int bpf_link_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct bpf_link *link = filp->private_data;
|
||||
|
||||
- bpf_link_put(link);
|
||||
+ bpf_link_put_direct(link);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -4674,7 +4677,7 @@ static int link_update(union bpf_attr *attr)
|
||||
if (ret)
|
||||
bpf_prog_put(new_prog);
|
||||
out_put_link:
|
||||
- bpf_link_put(link);
|
||||
+ bpf_link_put_direct(link);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -4697,7 +4700,7 @@ static int link_detach(union bpf_attr *attr)
|
||||
else
|
||||
ret = -EOPNOTSUPP;
|
||||
|
||||
- bpf_link_put(link);
|
||||
+ bpf_link_put_direct(link);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -4767,7 +4770,7 @@ static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
|
||||
|
||||
fd = bpf_link_new_fd(link);
|
||||
if (fd < 0)
|
||||
- bpf_link_put(link);
|
||||
+ bpf_link_put_direct(link);
|
||||
|
||||
return fd;
|
||||
}
|
||||
@@ -4844,7 +4847,7 @@ static int bpf_iter_create(union bpf_attr *attr)
|
||||
return PTR_ERR(link);
|
||||
|
||||
err = bpf_iter_new_fd(link);
|
||||
- bpf_link_put(link);
|
||||
+ bpf_link_put_direct(link);
|
||||
|
||||
return err;
|
||||
}
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,115 @@
|
|||
From f874ed9f1662ee8fabfebd26a21932420fe8519b Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:49 +0206
|
||||
Subject: [PATCH 059/195] serial: liteuart: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Acked-by: Gabriel Somlo <gsomlo@gmail.com>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-33-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/liteuart.c | 20 ++++++++++----------
|
||||
1 file changed, 10 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
|
||||
index d881cdd2a58f..a25ab1efe38f 100644
|
||||
--- a/drivers/tty/serial/liteuart.c
|
||||
+++ b/drivers/tty/serial/liteuart.c
|
||||
@@ -139,13 +139,13 @@ static irqreturn_t liteuart_interrupt(int irq, void *data)
|
||||
* if polling, the context would be "in_serving_softirq", so use
|
||||
* irq[save|restore] spin_lock variants to cover all possibilities
|
||||
*/
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
isr = litex_read8(port->membase + OFF_EV_PENDING) & uart->irq_reg;
|
||||
if (isr & EV_RX)
|
||||
liteuart_rx_chars(port);
|
||||
if (isr & EV_TX)
|
||||
liteuart_tx_chars(port);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
return IRQ_RETVAL(isr);
|
||||
}
|
||||
@@ -195,10 +195,10 @@ static int liteuart_startup(struct uart_port *port)
|
||||
}
|
||||
}
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
/* only enabling rx irqs during startup */
|
||||
liteuart_update_irq_reg(port, true, EV_RX);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
if (!port->irq) {
|
||||
timer_setup(&uart->timer, liteuart_timer, 0);
|
||||
@@ -213,9 +213,9 @@ static void liteuart_shutdown(struct uart_port *port)
|
||||
struct liteuart_port *uart = to_liteuart_port(port);
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
liteuart_update_irq_reg(port, false, EV_RX | EV_TX);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
if (port->irq)
|
||||
free_irq(port->irq, port);
|
||||
@@ -229,13 +229,13 @@ static void liteuart_set_termios(struct uart_port *port, struct ktermios *new,
|
||||
unsigned int baud;
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
/* update baudrate */
|
||||
baud = uart_get_baud_rate(port, new, old, 0, 460800);
|
||||
uart_update_timeout(port, new->c_cflag, baud);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static const char *liteuart_type(struct uart_port *port)
|
||||
@@ -382,9 +382,9 @@ static void liteuart_console_write(struct console *co, const char *s,
|
||||
uart = (struct liteuart_port *)xa_load(&liteuart_array, co->index);
|
||||
port = &uart->port;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
uart_console_write(port, s, count, liteuart_putchar);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static int liteuart_console_setup(struct console *co, char *options)
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -1,114 +0,0 @@
|
|||
From 9bd94469a4ea8355cd39603280261eeff2cd0579 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 1 Jun 2023 20:58:47 +0200
|
||||
Subject: [PATCH 60/62] posix-timers: Ensure timer ID search-loop limit is
|
||||
valid
|
||||
|
||||
posix_timer_add() tries to allocate a posix timer ID by starting from the
|
||||
cached ID which was stored by the last successful allocation.
|
||||
|
||||
This is done in a loop searching the ID space for a free slot one by
|
||||
one. The loop has to terminate when the search wrapped around to the
|
||||
starting point.
|
||||
|
||||
But that's racy vs. establishing the starting point. That is read out
|
||||
lockless, which leads to the following problem:
|
||||
|
||||
CPU0 CPU1
|
||||
posix_timer_add()
|
||||
start = sig->posix_timer_id;
|
||||
lock(hash_lock);
|
||||
... posix_timer_add()
|
||||
if (++sig->posix_timer_id < 0)
|
||||
start = sig->posix_timer_id;
|
||||
sig->posix_timer_id = 0;
|
||||
|
||||
So CPU1 can observe a negative start value, i.e. -1, and the loop break
|
||||
never happens because the condition can never be true:
|
||||
|
||||
if (sig->posix_timer_id == start)
|
||||
break;
|
||||
|
||||
While this is unlikely to ever turn into an endless loop as the ID space is
|
||||
huge (INT_MAX), the racy read of the start value caught the attention of
|
||||
KCSAN and Dmitry unearthed that incorrectness.
|
||||
|
||||
Rewrite it so that all id operations are under the hash lock.
|
||||
|
||||
Reported-by: syzbot+5c54bd3eb218bb595aa9@syzkaller.appspotmail.com
|
||||
Reported-by: Dmitry Vyukov <dvyukov@google.com>
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
|
||||
Link: https://lore.kernel.org/r/87bkhzdn6g.ffs@tglx
|
||||
|
||||
(cherry picked from commit 8ce8849dd1e78dadcee0ec9acbd259d239b7069f)
|
||||
Signed-off-by: Clark Williams <clark.williams@gmail.com>
|
||||
---
|
||||
include/linux/sched/signal.h | 2 +-
|
||||
kernel/time/posix-timers.c | 31 ++++++++++++++++++-------------
|
||||
2 files changed, 19 insertions(+), 14 deletions(-)
|
||||
|
||||
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
|
||||
index 20099268fa25..669e8cff40c7 100644
|
||||
--- a/include/linux/sched/signal.h
|
||||
+++ b/include/linux/sched/signal.h
|
||||
@@ -135,7 +135,7 @@ struct signal_struct {
|
||||
#ifdef CONFIG_POSIX_TIMERS
|
||||
|
||||
/* POSIX.1b Interval Timers */
|
||||
- int posix_timer_id;
|
||||
+ unsigned int next_posix_timer_id;
|
||||
struct list_head posix_timers;
|
||||
|
||||
/* ITIMER_REAL timer for the process */
|
||||
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
|
||||
index ed3c4a954398..2d6cf93ca370 100644
|
||||
--- a/kernel/time/posix-timers.c
|
||||
+++ b/kernel/time/posix-timers.c
|
||||
@@ -140,25 +140,30 @@ static struct k_itimer *posix_timer_by_id(timer_t id)
|
||||
static int posix_timer_add(struct k_itimer *timer)
|
||||
{
|
||||
struct signal_struct *sig = current->signal;
|
||||
- int first_free_id = sig->posix_timer_id;
|
||||
struct hlist_head *head;
|
||||
- int ret = -ENOENT;
|
||||
+ unsigned int cnt, id;
|
||||
|
||||
- do {
|
||||
+ /*
|
||||
+ * FIXME: Replace this by a per signal struct xarray once there is
|
||||
+ * a plan to handle the resulting CRIU regression gracefully.
|
||||
+ */
|
||||
+ for (cnt = 0; cnt <= INT_MAX; cnt++) {
|
||||
spin_lock(&hash_lock);
|
||||
- head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)];
|
||||
- if (!__posix_timers_find(head, sig, sig->posix_timer_id)) {
|
||||
+ id = sig->next_posix_timer_id;
|
||||
+
|
||||
+ /* Write the next ID back. Clamp it to the positive space */
|
||||
+ sig->next_posix_timer_id = (id + 1) & INT_MAX;
|
||||
+
|
||||
+ head = &posix_timers_hashtable[hash(sig, id)];
|
||||
+ if (!__posix_timers_find(head, sig, id)) {
|
||||
hlist_add_head_rcu(&timer->t_hash, head);
|
||||
- ret = sig->posix_timer_id;
|
||||
+ spin_unlock(&hash_lock);
|
||||
+ return id;
|
||||
}
|
||||
- if (++sig->posix_timer_id < 0)
|
||||
- sig->posix_timer_id = 0;
|
||||
- if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT))
|
||||
- /* Loop over all possible ids completed */
|
||||
- ret = -EAGAIN;
|
||||
spin_unlock(&hash_lock);
|
||||
- } while (ret == -ENOENT);
|
||||
- return ret;
|
||||
+ }
|
||||
+ /* POSIX return code when no timer ID could be allocated */
|
||||
+ return -EAGAIN;
|
||||
}
|
||||
|
||||
static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,153 @@
|
|||
From 9e8eb2f943add1f6eedbdb30e247d9be86bf2f8c Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:50 +0206
|
||||
Subject: [PATCH 060/195] serial: lpc32xx_hs: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-34-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/lpc32xx_hs.c | 26 +++++++++++++-------------
|
||||
1 file changed, 13 insertions(+), 13 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
|
||||
index b38fe4728c26..5149a947b7fe 100644
|
||||
--- a/drivers/tty/serial/lpc32xx_hs.c
|
||||
+++ b/drivers/tty/serial/lpc32xx_hs.c
|
||||
@@ -140,15 +140,15 @@ static void lpc32xx_hsuart_console_write(struct console *co, const char *s,
|
||||
if (up->port.sysrq)
|
||||
locked = 0;
|
||||
else if (oops_in_progress)
|
||||
- locked = spin_trylock(&up->port.lock);
|
||||
+ locked = uart_port_trylock(&up->port);
|
||||
else
|
||||
- spin_lock(&up->port.lock);
|
||||
+ uart_port_lock(&up->port);
|
||||
|
||||
uart_console_write(&up->port, s, count, lpc32xx_hsuart_console_putchar);
|
||||
wait_for_xmit_empty(&up->port);
|
||||
|
||||
if (locked)
|
||||
- spin_unlock(&up->port.lock);
|
||||
+ uart_port_unlock(&up->port);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@@ -298,7 +298,7 @@ static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id)
|
||||
struct tty_port *tport = &port->state->port;
|
||||
u32 status;
|
||||
|
||||
- spin_lock(&port->lock);
|
||||
+ uart_port_lock(port);
|
||||
|
||||
/* Read UART status and clear latched interrupts */
|
||||
status = readl(LPC32XX_HSUART_IIR(port->membase));
|
||||
@@ -333,7 +333,7 @@ static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id)
|
||||
__serial_lpc32xx_tx(port);
|
||||
}
|
||||
|
||||
- spin_unlock(&port->lock);
|
||||
+ uart_port_unlock(port);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@@ -404,14 +404,14 @@ static void serial_lpc32xx_break_ctl(struct uart_port *port,
|
||||
unsigned long flags;
|
||||
u32 tmp;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
tmp = readl(LPC32XX_HSUART_CTRL(port->membase));
|
||||
if (break_state != 0)
|
||||
tmp |= LPC32XX_HSU_BREAK;
|
||||
else
|
||||
tmp &= ~LPC32XX_HSU_BREAK;
|
||||
writel(tmp, LPC32XX_HSUART_CTRL(port->membase));
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
/* port->lock is not held. */
|
||||
@@ -421,7 +421,7 @@ static int serial_lpc32xx_startup(struct uart_port *port)
|
||||
unsigned long flags;
|
||||
u32 tmp;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
__serial_uart_flush(port);
|
||||
|
||||
@@ -441,7 +441,7 @@ static int serial_lpc32xx_startup(struct uart_port *port)
|
||||
|
||||
lpc32xx_loopback_set(port->mapbase, 0); /* get out of loopback mode */
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
retval = request_irq(port->irq, serial_lpc32xx_interrupt,
|
||||
0, MODNAME, port);
|
||||
@@ -458,7 +458,7 @@ static void serial_lpc32xx_shutdown(struct uart_port *port)
|
||||
u32 tmp;
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
tmp = LPC32XX_HSU_TX_TL8B | LPC32XX_HSU_RX_TL32B |
|
||||
LPC32XX_HSU_OFFSET(20) | LPC32XX_HSU_TMO_INACT_4B;
|
||||
@@ -466,7 +466,7 @@ static void serial_lpc32xx_shutdown(struct uart_port *port)
|
||||
|
||||
lpc32xx_loopback_set(port->mapbase, 1); /* go to loopback mode */
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
free_irq(port->irq, port);
|
||||
}
|
||||
@@ -491,7 +491,7 @@ static void serial_lpc32xx_set_termios(struct uart_port *port,
|
||||
|
||||
quot = __serial_get_clock_div(port->uartclk, baud);
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
/* Ignore characters? */
|
||||
tmp = readl(LPC32XX_HSUART_CTRL(port->membase));
|
||||
@@ -505,7 +505,7 @@ static void serial_lpc32xx_set_termios(struct uart_port *port,
|
||||
|
||||
uart_update_timeout(port, termios->c_cflag, baud);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
/* Don't rewrite B0 */
|
||||
if (tty_termios_baud_rate(termios))
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,122 @@
|
|||
From 294b8734d1b7df5b8bd169179926820cf601ee49 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:51 +0206
|
||||
Subject: [PATCH 061/195] serial: ma35d1: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-35-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/ma35d1_serial.c | 22 +++++++++++-----------
|
||||
1 file changed, 11 insertions(+), 11 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/ma35d1_serial.c b/drivers/tty/serial/ma35d1_serial.c
|
||||
index 69da24565b99..73910c54d6be 100644
|
||||
--- a/drivers/tty/serial/ma35d1_serial.c
|
||||
+++ b/drivers/tty/serial/ma35d1_serial.c
|
||||
@@ -269,16 +269,16 @@ static void receive_chars(struct uart_ma35d1_port *up)
|
||||
if (uart_handle_sysrq_char(&up->port, ch))
|
||||
continue;
|
||||
|
||||
- spin_lock(&up->port.lock);
|
||||
+ uart_port_lock(&up->port);
|
||||
uart_insert_char(&up->port, fsr, MA35_FSR_RX_OVER_IF, ch, flag);
|
||||
- spin_unlock(&up->port.lock);
|
||||
+ uart_port_unlock(&up->port);
|
||||
|
||||
fsr = serial_in(up, MA35_FSR_REG);
|
||||
} while (!(fsr & MA35_FSR_RX_EMPTY) && (max_count-- > 0));
|
||||
|
||||
- spin_lock(&up->port.lock);
|
||||
+ uart_port_lock(&up->port);
|
||||
tty_flip_buffer_push(&up->port.state->port);
|
||||
- spin_unlock(&up->port.lock);
|
||||
+ uart_port_unlock(&up->port);
|
||||
}
|
||||
|
||||
static irqreturn_t ma35d1serial_interrupt(int irq, void *dev_id)
|
||||
@@ -364,14 +364,14 @@ static void ma35d1serial_break_ctl(struct uart_port *port, int break_state)
|
||||
unsigned long flags;
|
||||
u32 lcr;
|
||||
|
||||
- spin_lock_irqsave(&up->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||||
lcr = serial_in(up, MA35_LCR_REG);
|
||||
if (break_state != 0)
|
||||
lcr |= MA35_LCR_BREAK;
|
||||
else
|
||||
lcr &= ~MA35_LCR_BREAK;
|
||||
serial_out(up, MA35_LCR_REG, lcr);
|
||||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||||
}
|
||||
|
||||
static int ma35d1serial_startup(struct uart_port *port)
|
||||
@@ -441,7 +441,7 @@ static void ma35d1serial_set_termios(struct uart_port *port,
|
||||
* Ok, we're now changing the port state. Do it with
|
||||
* interrupts disabled.
|
||||
*/
|
||||
- spin_lock_irqsave(&up->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||||
|
||||
up->port.read_status_mask = MA35_FSR_RX_OVER_IF;
|
||||
if (termios->c_iflag & INPCK)
|
||||
@@ -475,7 +475,7 @@ static void ma35d1serial_set_termios(struct uart_port *port,
|
||||
|
||||
serial_out(up, MA35_LCR_REG, lcr);
|
||||
|
||||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||||
}
|
||||
|
||||
static const char *ma35d1serial_type(struct uart_port *port)
|
||||
@@ -568,9 +568,9 @@ static void ma35d1serial_console_write(struct console *co, const char *s, u32 co
|
||||
if (up->port.sysrq)
|
||||
locked = 0;
|
||||
else if (oops_in_progress)
|
||||
- locked = spin_trylock_irqsave(&up->port.lock, flags);
|
||||
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
|
||||
else
|
||||
- spin_lock_irqsave(&up->port.lock, flags);
|
||||
+ uart_port_lock_irqsave(&up->port, &flags);
|
||||
|
||||
/*
|
||||
* First save the IER then disable the interrupts
|
||||
@@ -584,7 +584,7 @@ static void ma35d1serial_console_write(struct console *co, const char *s, u32 co
|
||||
serial_out(up, MA35_IER_REG, ier);
|
||||
|
||||
if (locked)
|
||||
- spin_unlock_irqrestore(&up->port.lock, flags);
|
||||
+ uart_port_unlock_irqrestore(&up->port, flags);
|
||||
}
|
||||
|
||||
static int __init ma35d1serial_console_setup(struct console *co, char *options)
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,132 @@
|
|||
From 6c3f1909ab8c0fc94df3475a77d308b8ad7f7a98 Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:52 +0206
|
||||
Subject: [PATCH 062/195] serial: mcf: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-36-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/mcf.c | 20 ++++++++++----------
|
||||
1 file changed, 10 insertions(+), 10 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
|
||||
index 1666ce012e5e..91b15243f6c6 100644
|
||||
--- a/drivers/tty/serial/mcf.c
|
||||
+++ b/drivers/tty/serial/mcf.c
|
||||
@@ -135,12 +135,12 @@ static void mcf_break_ctl(struct uart_port *port, int break_state)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
if (break_state == -1)
|
||||
writeb(MCFUART_UCR_CMDBREAKSTART, port->membase + MCFUART_UCR);
|
||||
else
|
||||
writeb(MCFUART_UCR_CMDBREAKSTOP, port->membase + MCFUART_UCR);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
@@ -150,7 +150,7 @@ static int mcf_startup(struct uart_port *port)
|
||||
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
/* Reset UART, get it into known state... */
|
||||
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
|
||||
@@ -164,7 +164,7 @@ static int mcf_startup(struct uart_port *port)
|
||||
pp->imr = MCFUART_UIR_RXREADY;
|
||||
writeb(pp->imr, port->membase + MCFUART_UIMR);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -176,7 +176,7 @@ static void mcf_shutdown(struct uart_port *port)
|
||||
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
/* Disable all interrupts now */
|
||||
pp->imr = 0;
|
||||
@@ -186,7 +186,7 @@ static void mcf_shutdown(struct uart_port *port)
|
||||
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
|
||||
writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
@@ -252,7 +252,7 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
mr2 |= MCFUART_MR2_TXCTS;
|
||||
}
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
if (port->rs485.flags & SER_RS485_ENABLED) {
|
||||
dev_dbg(port->dev, "Setting UART to RS485\n");
|
||||
mr2 |= MCFUART_MR2_TXRTS;
|
||||
@@ -273,7 +273,7 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
port->membase + MCFUART_UCSR);
|
||||
writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE,
|
||||
port->membase + MCFUART_UCR);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
@@ -350,7 +350,7 @@ static irqreturn_t mcf_interrupt(int irq, void *data)
|
||||
|
||||
isr = readb(port->membase + MCFUART_UISR) & pp->imr;
|
||||
|
||||
- spin_lock(&port->lock);
|
||||
+ uart_port_lock(port);
|
||||
if (isr & MCFUART_UIR_RXREADY) {
|
||||
mcf_rx_chars(pp);
|
||||
ret = IRQ_HANDLED;
|
||||
@@ -359,7 +359,7 @@ static irqreturn_t mcf_interrupt(int irq, void *data)
|
||||
mcf_tx_chars(pp);
|
||||
ret = IRQ_HANDLED;
|
||||
}
|
||||
- spin_unlock(&port->lock);
|
||||
+ uart_port_unlock(port);
|
||||
|
||||
return ret;
|
||||
}
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,81 @@
|
|||
From ebfe5facac5e1f8837c7fd7f4502baed9832eccd Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:53 +0206
|
||||
Subject: [PATCH 063/195] serial: men_z135_uart: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-37-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/men_z135_uart.c | 8 ++++----
|
||||
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
|
||||
index d2502aaa3e8c..8048fa542fc4 100644
|
||||
--- a/drivers/tty/serial/men_z135_uart.c
|
||||
+++ b/drivers/tty/serial/men_z135_uart.c
|
||||
@@ -392,7 +392,7 @@ static irqreturn_t men_z135_intr(int irq, void *data)
|
||||
if (!irq_id)
|
||||
goto out;
|
||||
|
||||
- spin_lock(&port->lock);
|
||||
+ uart_port_lock(port);
|
||||
/* It's save to write to IIR[7:6] RXC[9:8] */
|
||||
iowrite8(irq_id, port->membase + MEN_Z135_STAT_REG);
|
||||
|
||||
@@ -418,7 +418,7 @@ static irqreturn_t men_z135_intr(int irq, void *data)
|
||||
handled = true;
|
||||
}
|
||||
|
||||
- spin_unlock(&port->lock);
|
||||
+ uart_port_unlock(port);
|
||||
out:
|
||||
return IRQ_RETVAL(handled);
|
||||
}
|
||||
@@ -708,7 +708,7 @@ static void men_z135_set_termios(struct uart_port *port,
|
||||
|
||||
baud = uart_get_baud_rate(port, termios, old, 0, uart_freq / 16);
|
||||
|
||||
- spin_lock_irq(&port->lock);
|
||||
+ uart_port_lock_irq(port);
|
||||
if (tty_termios_baud_rate(termios))
|
||||
tty_termios_encode_baud_rate(termios, baud, baud);
|
||||
|
||||
@@ -716,7 +716,7 @@ static void men_z135_set_termios(struct uart_port *port,
|
||||
iowrite32(bd_reg, port->membase + MEN_Z135_BAUD_REG);
|
||||
|
||||
uart_update_timeout(port, termios->c_cflag, baud);
|
||||
- spin_unlock_irq(&port->lock);
|
||||
+ uart_port_unlock_irq(port);
|
||||
}
|
||||
|
||||
static const char *men_z135_type(struct uart_port *port)
|
||||
--
|
||||
2.43.0
|
||||
|
|
@ -0,0 +1,173 @@
|
|||
From 348c0f05fb287f150e7eaa783c40cb73910f589d Mon Sep 17 00:00:00 2001
|
||||
From: Thomas Gleixner <tglx@linutronix.de>
|
||||
Date: Thu, 14 Sep 2023 20:43:54 +0206
|
||||
Subject: [PATCH 064/195] serial: meson: Use port lock wrappers
|
||||
|
||||
When a serial port is used for kernel console output, then all
|
||||
modifications to the UART registers which are done from other contexts,
|
||||
e.g. getty, termios, are interference points for the kernel console.
|
||||
|
||||
So far this has been ignored and the printk output is based on the
|
||||
principle of hope. The rework of the console infrastructure which aims to
|
||||
support threaded and atomic consoles, requires to mark sections which
|
||||
modify the UART registers as unsafe. This allows the atomic write function
|
||||
to make informed decisions and eventually to restore operational state. It
|
||||
also allows to prevent the regular UART code from modifying UART registers
|
||||
while printk output is in progress.
|
||||
|
||||
All modifications of UART registers are guarded by the UART port lock,
|
||||
which provides an obvious synchronization point with the console
|
||||
infrastructure.
|
||||
|
||||
To avoid adding this functionality to all UART drivers, wrap the
|
||||
spin_[un]lock*() invocations for uart_port::lock into helper functions
|
||||
which just contain the spin_[un]lock*() invocations for now. In a
|
||||
subsequent step these helpers will gain the console synchronization
|
||||
mechanisms.
|
||||
|
||||
Converted with coccinelle. No functional change.
|
||||
|
||||
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||||
Acked-by: Neil Armstrong <neil.armstrong@linaro.org>
|
||||
Signed-off-by: John Ogness <john.ogness@linutronix.de>
|
||||
Link: https://lore.kernel.org/r/20230914183831.587273-38-john.ogness@linutronix.de
|
||||
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||||
---
|
||||
drivers/tty/serial/meson_uart.c | 30 +++++++++++++++---------------
|
||||
1 file changed, 15 insertions(+), 15 deletions(-)
|
||||
|
||||
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
|
||||
index 9388b9ddea3b..4c1d2089a0bb 100644
|
||||
--- a/drivers/tty/serial/meson_uart.c
|
||||
+++ b/drivers/tty/serial/meson_uart.c
|
||||
@@ -129,14 +129,14 @@ static void meson_uart_shutdown(struct uart_port *port)
|
||||
|
||||
free_irq(port->irq, port);
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
val = readl(port->membase + AML_UART_CONTROL);
|
||||
val &= ~AML_UART_RX_EN;
|
||||
val &= ~(AML_UART_RX_INT_EN | AML_UART_TX_INT_EN);
|
||||
writel(val, port->membase + AML_UART_CONTROL);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static void meson_uart_start_tx(struct uart_port *port)
|
||||
@@ -238,7 +238,7 @@ static irqreturn_t meson_uart_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct uart_port *port = (struct uart_port *)dev_id;
|
||||
|
||||
- spin_lock(&port->lock);
|
||||
+ uart_port_lock(port);
|
||||
|
||||
if (!(readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY))
|
||||
meson_receive_chars(port);
|
||||
@@ -248,7 +248,7 @@ static irqreturn_t meson_uart_interrupt(int irq, void *dev_id)
|
||||
meson_uart_start_tx(port);
|
||||
}
|
||||
|
||||
- spin_unlock(&port->lock);
|
||||
+ uart_port_unlock(port);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@@ -284,7 +284,7 @@ static int meson_uart_startup(struct uart_port *port)
|
||||
u32 val;
|
||||
int ret = 0;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
val = readl(port->membase + AML_UART_CONTROL);
|
||||
val |= AML_UART_CLEAR_ERR;
|
||||
@@ -301,7 +301,7 @@ static int meson_uart_startup(struct uart_port *port)
|
||||
val = (AML_UART_RECV_IRQ(1) | AML_UART_XMIT_IRQ(port->fifosize / 2));
|
||||
writel(val, port->membase + AML_UART_MISC);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
ret = request_irq(port->irq, meson_uart_interrupt, 0,
|
||||
port->name, port);
|
||||
@@ -341,7 +341,7 @@ static void meson_uart_set_termios(struct uart_port *port,
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
cflags = termios->c_cflag;
|
||||
iflags = termios->c_iflag;
|
||||
@@ -405,7 +405,7 @@ static void meson_uart_set_termios(struct uart_port *port,
|
||||
AML_UART_FRAME_ERR;
|
||||
|
||||
uart_update_timeout(port, termios->c_cflag, baud);
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
static int meson_uart_verify_port(struct uart_port *port,
|
||||
@@ -464,14 +464,14 @@ static int meson_uart_poll_get_char(struct uart_port *port)
|
||||
u32 c;
|
||||
unsigned long flags;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
if (readl(port->membase + AML_UART_STATUS) & AML_UART_RX_EMPTY)
|
||||
c = NO_POLL_CHAR;
|
||||
else
|
||||
c = readl(port->membase + AML_UART_RFIFO);
|
||||
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
|
||||
return c;
|
||||
}
|
||||
@@ -482,7 +482,7 @@ static void meson_uart_poll_put_char(struct uart_port *port, unsigned char c)
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
- spin_lock_irqsave(&port->lock, flags);
|
||||
+ uart_port_lock_irqsave(port, &flags);
|
||||
|
||||
/* Wait until FIFO is empty or timeout */
|
||||
ret = readl_poll_timeout_atomic(port->membase + AML_UART_STATUS, reg,
|
||||
@@ -506,7 +506,7 @@ static void meson_uart_poll_put_char(struct uart_port *port, unsigned char c)
|
||||
dev_err(port->dev, "Timeout waiting for UART TX EMPTY\n");
|
||||
|
||||
out:
|
||||
- spin_unlock_irqrestore(&port->lock, flags);
|
||||
+ uart_port_unlock_irqrestore(port, flags);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CONSOLE_POLL */
|
||||
@@ -563,9 +563,9 @@ static void meson_serial_port_write(struct uart_port *port, const char *s,
|
||||
if (port->sysrq) {
|
||||
locked = 0;
|
||||
} else if (oops_in_progress) {
|
||||
- locked = spin_trylock(&port->lock);
|
||||
+ locked = uart_port_trylock(port);
|
||||
} else {
|
||||
- spin_lock(&port->lock);
|
||||
+ uart_port_lock(port);
|
||||
locked = 1;
|
||||
}
|
||||
|
||||
@@ -577,7 +577,7 @@ static void meson_serial_port_write(struct uart_port *port, const char *s,
|
||||
writel(val, port->membase + AML_UART_CONTROL);
|
||||
|
||||
if (locked)
|
||||
- spin_unlock(&port->lock);
|
||||
+ uart_port_unlock(port);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
--
|
||||
2.43.0
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue