mirror of
https://github.com/OpenVoiceOS/OpenVoiceOS
synced 2025-02-21 22:27:49 +01:00
129 lines
3.8 KiB
Diff
129 lines
3.8 KiB
Diff
From 3080305e309931d30a95a7f597fd336d8c18abfc Mon Sep 17 00:00:00 2001
|
|
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
Date: Mon, 15 Aug 2022 17:29:50 +0200
|
|
Subject: [PATCH 015/196] net: Avoid the IPI to free the
|
|
|
|
skb_attempt_defer_free() collects a skbs, which was allocated on a
|
|
remote CPU, on a per-CPU list. These skbs are either freed on that
|
|
remote CPU once the CPU enters NET_RX or an remote IPI function is
|
|
invoked in to raise the NET_RX softirq if a threshold of pending skb has
|
|
been exceeded.
|
|
This remote IPI can cause the wakeup of ksoftirqd on PREEMPT_RT if the
|
|
remote CPU idle was idle. This is undesired because once the ksoftirqd
|
|
is running it will acquire all pending softirqs and they will not be
|
|
executed as part of the threaded interrupt until ksoftird goes idle
|
|
again.
|
|
|
|
To void all this, schedule the deferred clean up from a worker.
|
|
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
---
|
|
include/linux/netdevice.h | 4 ++++
|
|
net/core/dev.c | 39 ++++++++++++++++++++++++++++++---------
|
|
net/core/skbuff.c | 7 ++++++-
|
|
3 files changed, 40 insertions(+), 10 deletions(-)
|
|
|
|
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
|
|
index b8e60a20416b..ffa5248a90e2 100644
|
|
--- a/include/linux/netdevice.h
|
|
+++ b/include/linux/netdevice.h
|
|
@@ -3258,7 +3258,11 @@ struct softnet_data {
|
|
int defer_count;
|
|
int defer_ipi_scheduled;
|
|
struct sk_buff *defer_list;
|
|
+#ifndef CONFIG_PREEMPT_RT
|
|
call_single_data_t defer_csd;
|
|
+#else
|
|
+ struct work_struct defer_work;
|
|
+#endif
|
|
};
|
|
|
|
static inline void input_queue_head_incr(struct softnet_data *sd)
|
|
diff --git a/net/core/dev.c b/net/core/dev.c
|
|
index 1f6c8945f2ec..1e8928cd3c75 100644
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -4705,15 +4705,6 @@ static void rps_trigger_softirq(void *data)
|
|
|
|
#endif /* CONFIG_RPS */
|
|
|
|
-/* Called from hardirq (IPI) context */
|
|
-static void trigger_rx_softirq(void *data)
|
|
-{
|
|
- struct softnet_data *sd = data;
|
|
-
|
|
- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
|
|
- smp_store_release(&sd->defer_ipi_scheduled, 0);
|
|
-}
|
|
-
|
|
/*
|
|
* After we queued a packet into sd->input_pkt_queue,
|
|
* we need to make sure this queue is serviced soon.
|
|
@@ -6682,6 +6673,32 @@ static void skb_defer_free_flush(struct softnet_data *sd)
|
|
}
|
|
}
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT
|
|
+
|
|
+/* Called from hardirq (IPI) context */
|
|
+static void trigger_rx_softirq(void *data)
|
|
+{
|
|
+ struct softnet_data *sd = data;
|
|
+
|
|
+ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
|
|
+ smp_store_release(&sd->defer_ipi_scheduled, 0);
|
|
+}
|
|
+
|
|
+#else
|
|
+
|
|
+static void trigger_rx_softirq(struct work_struct *defer_work)
|
|
+{
|
|
+ struct softnet_data *sd;
|
|
+
|
|
+ sd = container_of(defer_work, struct softnet_data, defer_work);
|
|
+ smp_store_release(&sd->defer_ipi_scheduled, 0);
|
|
+ local_bh_disable();
|
|
+ skb_defer_free_flush(sd);
|
|
+ local_bh_enable();
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
static int napi_threaded_poll(void *data)
|
|
{
|
|
struct napi_struct *napi = data;
|
|
@@ -11618,7 +11635,11 @@ static int __init net_dev_init(void)
|
|
INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
|
|
sd->cpu = i;
|
|
#endif
|
|
+#ifndef CONFIG_PREEMPT_RT
|
|
INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
|
|
+#else
|
|
+ INIT_WORK(&sd->defer_work, trigger_rx_softirq);
|
|
+#endif
|
|
spin_lock_init(&sd->defer_lock);
|
|
|
|
init_gro_hash(&sd->backlog);
|
|
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
|
|
index f0a9ef1aeaa2..682175af439d 100644
|
|
--- a/net/core/skbuff.c
|
|
+++ b/net/core/skbuff.c
|
|
@@ -6863,8 +6863,13 @@ nodefer: __kfree_skb(skb);
|
|
/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
|
|
* if we are unlucky enough (this seems very unlikely).
|
|
*/
|
|
- if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1))
|
|
+ if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
|
|
+#ifndef CONFIG_PREEMPT_RT
|
|
smp_call_function_single_async(cpu, &sd->defer_csd);
|
|
+#else
|
|
+ schedule_work_on(cpu, &sd->defer_work);
|
|
+#endif
|
|
+ }
|
|
}
|
|
|
|
static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,
|
|
--
|
|
2.45.1
|
|
|