123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164 |
- From 765b11f8f4e20b7433e4ba4a3e9106a0d59501ed Mon Sep 17 00:00:00 2001
- From: Sebastian Andrzej Siewior <[email protected]>
- Date: Mon, 25 Mar 2024 08:40:31 +0100
- Subject: [PATCH 4/4] net: Rename rps_lock to backlog_lock.
- The rps_lock.*() functions use the inner lock of a sk_buff_head for
- locking. This lock is used if RPS is enabled, otherwise the list is
- accessed lockless and disabling interrupts is enough for the
- synchronisation because it is only accessed CPU local. Not only the list
- is protected but also the NAPI state protected.
- With the addition of backlog threads, the lock is also needed because of
- the cross CPU access even without RPS. The clean up of the defer_list
- list is also done via backlog threads (if enabled).
- It has been suggested to rename the locking function since it is no
- longer just RPS.
- Rename the rps_lock*() functions to backlog_lock*().
- Suggested-by: Jakub Kicinski <[email protected]>
- Acked-by: Jakub Kicinski <[email protected]>
- Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
- Signed-off-by: Paolo Abeni <[email protected]>
- ---
- net/core/dev.c | 34 +++++++++++++++++-----------------
- 1 file changed, 17 insertions(+), 17 deletions(-)
- --- a/net/core/dev.c
- +++ b/net/core/dev.c
- @@ -243,8 +243,8 @@ static bool use_backlog_threads(void)
-
- #endif
-
- -static inline void rps_lock_irqsave(struct softnet_data *sd,
- - unsigned long *flags)
- +static inline void backlog_lock_irq_save(struct softnet_data *sd,
- + unsigned long *flags)
- {
- if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
- @@ -252,7 +252,7 @@ static inline void rps_lock_irqsave(stru
- local_irq_save(*flags);
- }
-
- -static inline void rps_lock_irq_disable(struct softnet_data *sd)
- +static inline void backlog_lock_irq_disable(struct softnet_data *sd)
- {
- if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_lock_irq(&sd->input_pkt_queue.lock);
- @@ -260,8 +260,8 @@ static inline void rps_lock_irq_disable(
- local_irq_disable();
- }
-
- -static inline void rps_unlock_irq_restore(struct softnet_data *sd,
- - unsigned long *flags)
- +static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
- + unsigned long *flags)
- {
- if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
- @@ -269,7 +269,7 @@ static inline void rps_unlock_irq_restor
- local_irq_restore(*flags);
- }
-
- -static inline void rps_unlock_irq_enable(struct softnet_data *sd)
- +static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
- {
- if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- spin_unlock_irq(&sd->input_pkt_queue.lock);
- @@ -4789,12 +4789,12 @@ void kick_defer_list_purge(struct softne
- unsigned long flags;
-
- if (use_backlog_threads()) {
- - rps_lock_irqsave(sd, &flags);
- + backlog_lock_irq_save(sd, &flags);
-
- if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
- __napi_schedule_irqoff(&sd->backlog);
-
- - rps_unlock_irq_restore(sd, &flags);
- + backlog_unlock_irq_restore(sd, &flags);
-
- } else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
- smp_call_function_single_async(cpu, &sd->defer_csd);
- @@ -4856,7 +4856,7 @@ static int enqueue_to_backlog(struct sk_
- reason = SKB_DROP_REASON_NOT_SPECIFIED;
- sd = &per_cpu(softnet_data, cpu);
-
- - rps_lock_irqsave(sd, &flags);
- + backlog_lock_irq_save(sd, &flags);
- if (!netif_running(skb->dev))
- goto drop;
- qlen = skb_queue_len(&sd->input_pkt_queue);
- @@ -4865,7 +4865,7 @@ static int enqueue_to_backlog(struct sk_
- enqueue:
- __skb_queue_tail(&sd->input_pkt_queue, skb);
- input_queue_tail_incr_save(sd, qtail);
- - rps_unlock_irq_restore(sd, &flags);
- + backlog_unlock_irq_restore(sd, &flags);
- return NET_RX_SUCCESS;
- }
-
- @@ -4880,7 +4880,7 @@ enqueue:
-
- drop:
- sd->dropped++;
- - rps_unlock_irq_restore(sd, &flags);
- + backlog_unlock_irq_restore(sd, &flags);
-
- dev_core_stats_rx_dropped_inc(skb->dev);
- kfree_skb_reason(skb, reason);
- @@ -5911,7 +5911,7 @@ static void flush_backlog(struct work_st
- local_bh_disable();
- sd = this_cpu_ptr(&softnet_data);
-
- - rps_lock_irq_disable(sd);
- + backlog_lock_irq_disable(sd);
- skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
- if (skb->dev->reg_state == NETREG_UNREGISTERING) {
- __skb_unlink(skb, &sd->input_pkt_queue);
- @@ -5919,7 +5919,7 @@ static void flush_backlog(struct work_st
- input_queue_head_incr(sd);
- }
- }
- - rps_unlock_irq_enable(sd);
- + backlog_unlock_irq_enable(sd);
-
- skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
- if (skb->dev->reg_state == NETREG_UNREGISTERING) {
- @@ -5937,14 +5937,14 @@ static bool flush_required(int cpu)
- struct softnet_data *sd = &per_cpu(softnet_data, cpu);
- bool do_flush;
-
- - rps_lock_irq_disable(sd);
- + backlog_lock_irq_disable(sd);
-
- /* as insertion into process_queue happens with the rps lock held,
- * process_queue access may race only with dequeue
- */
- do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
- !skb_queue_empty_lockless(&sd->process_queue);
- - rps_unlock_irq_enable(sd);
- + backlog_unlock_irq_enable(sd);
-
- return do_flush;
- #endif
- @@ -6059,7 +6059,7 @@ static int process_backlog(struct napi_s
-
- }
-
- - rps_lock_irq_disable(sd);
- + backlog_lock_irq_disable(sd);
- if (skb_queue_empty(&sd->input_pkt_queue)) {
- /*
- * Inline a custom version of __napi_complete().
- @@ -6075,7 +6075,7 @@ static int process_backlog(struct napi_s
- skb_queue_splice_tail_init(&sd->input_pkt_queue,
- &sd->process_queue);
- }
- - rps_unlock_irq_enable(sd);
- + backlog_unlock_irq_enable(sd);
- }
-
- return work;
|