603-v6.10-net-Rename-rps_lock-to-backlog_lock.patch 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. From 765b11f8f4e20b7433e4ba4a3e9106a0d59501ed Mon Sep 17 00:00:00 2001
  2. From: Sebastian Andrzej Siewior <[email protected]>
  3. Date: Mon, 25 Mar 2024 08:40:31 +0100
  4. Subject: [PATCH 4/4] net: Rename rps_lock to backlog_lock.
  5. The rps_lock.*() functions use the inner lock of a sk_buff_head for
  6. locking. This lock is used if RPS is enabled, otherwise the list is
  7. accessed lockless and disabling interrupts is enough for the
  8. synchronisation because it is only accessed CPU local. Not only the list
  9. is protected but also the NAPI state protected.
  10. With the addition of backlog threads, the lock is also needed because of
  11. the cross CPU access even without RPS. The clean up of the defer_list
  12. list is also done via backlog threads (if enabled).
  13. It has been suggested to rename the locking function since it is no
  14. longer just RPS.
  15. Rename the rps_lock*() functions to backlog_lock*().
  16. Suggested-by: Jakub Kicinski <[email protected]>
  17. Acked-by: Jakub Kicinski <[email protected]>
  18. Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
  19. Signed-off-by: Paolo Abeni <[email protected]>
  20. ---
  21. net/core/dev.c | 34 +++++++++++++++++-----------------
  22. 1 file changed, 17 insertions(+), 17 deletions(-)
  23. --- a/net/core/dev.c
  24. +++ b/net/core/dev.c
  25. @@ -243,8 +243,8 @@ static bool use_backlog_threads(void)
  26. #endif
  27. -static inline void rps_lock_irqsave(struct softnet_data *sd,
  28. - unsigned long *flags)
  29. +static inline void backlog_lock_irq_save(struct softnet_data *sd,
  30. + unsigned long *flags)
  31. {
  32. if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
  33. spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
  34. @@ -252,7 +252,7 @@ static inline void rps_lock_irqsave(stru
  35. local_irq_save(*flags);
  36. }
  37. -static inline void rps_lock_irq_disable(struct softnet_data *sd)
  38. +static inline void backlog_lock_irq_disable(struct softnet_data *sd)
  39. {
  40. if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
  41. spin_lock_irq(&sd->input_pkt_queue.lock);
  42. @@ -260,8 +260,8 @@ static inline void rps_lock_irq_disable(
  43. local_irq_disable();
  44. }
  45. -static inline void rps_unlock_irq_restore(struct softnet_data *sd,
  46. - unsigned long *flags)
  47. +static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
  48. + unsigned long *flags)
  49. {
  50. if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
  51. spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
  52. @@ -269,7 +269,7 @@ static inline void rps_unlock_irq_restor
  53. local_irq_restore(*flags);
  54. }
  55. -static inline void rps_unlock_irq_enable(struct softnet_data *sd)
  56. +static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
  57. {
  58. if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
  59. spin_unlock_irq(&sd->input_pkt_queue.lock);
  60. @@ -4789,12 +4789,12 @@ void kick_defer_list_purge(struct softne
  61. unsigned long flags;
  62. if (use_backlog_threads()) {
  63. - rps_lock_irqsave(sd, &flags);
  64. + backlog_lock_irq_save(sd, &flags);
  65. if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
  66. __napi_schedule_irqoff(&sd->backlog);
  67. - rps_unlock_irq_restore(sd, &flags);
  68. + backlog_unlock_irq_restore(sd, &flags);
  69. } else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
  70. smp_call_function_single_async(cpu, &sd->defer_csd);
  71. @@ -4856,7 +4856,7 @@ static int enqueue_to_backlog(struct sk_
  72. reason = SKB_DROP_REASON_NOT_SPECIFIED;
  73. sd = &per_cpu(softnet_data, cpu);
  74. - rps_lock_irqsave(sd, &flags);
  75. + backlog_lock_irq_save(sd, &flags);
  76. if (!netif_running(skb->dev))
  77. goto drop;
  78. qlen = skb_queue_len(&sd->input_pkt_queue);
  79. @@ -4865,7 +4865,7 @@ static int enqueue_to_backlog(struct sk_
  80. enqueue:
  81. __skb_queue_tail(&sd->input_pkt_queue, skb);
  82. input_queue_tail_incr_save(sd, qtail);
  83. - rps_unlock_irq_restore(sd, &flags);
  84. + backlog_unlock_irq_restore(sd, &flags);
  85. return NET_RX_SUCCESS;
  86. }
  87. @@ -4880,7 +4880,7 @@ enqueue:
  88. drop:
  89. sd->dropped++;
  90. - rps_unlock_irq_restore(sd, &flags);
  91. + backlog_unlock_irq_restore(sd, &flags);
  92. dev_core_stats_rx_dropped_inc(skb->dev);
  93. kfree_skb_reason(skb, reason);
  94. @@ -5911,7 +5911,7 @@ static void flush_backlog(struct work_st
  95. local_bh_disable();
  96. sd = this_cpu_ptr(&softnet_data);
  97. - rps_lock_irq_disable(sd);
  98. + backlog_lock_irq_disable(sd);
  99. skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
  100. if (skb->dev->reg_state == NETREG_UNREGISTERING) {
  101. __skb_unlink(skb, &sd->input_pkt_queue);
  102. @@ -5919,7 +5919,7 @@ static void flush_backlog(struct work_st
  103. input_queue_head_incr(sd);
  104. }
  105. }
  106. - rps_unlock_irq_enable(sd);
  107. + backlog_unlock_irq_enable(sd);
  108. skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
  109. if (skb->dev->reg_state == NETREG_UNREGISTERING) {
  110. @@ -5937,14 +5937,14 @@ static bool flush_required(int cpu)
  111. struct softnet_data *sd = &per_cpu(softnet_data, cpu);
  112. bool do_flush;
  113. - rps_lock_irq_disable(sd);
  114. + backlog_lock_irq_disable(sd);
  115. /* as insertion into process_queue happens with the rps lock held,
  116. * process_queue access may race only with dequeue
  117. */
  118. do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
  119. !skb_queue_empty_lockless(&sd->process_queue);
  120. - rps_unlock_irq_enable(sd);
  121. + backlog_unlock_irq_enable(sd);
  122. return do_flush;
  123. #endif
  124. @@ -6059,7 +6059,7 @@ static int process_backlog(struct napi_s
  125. }
  126. - rps_lock_irq_disable(sd);
  127. + backlog_lock_irq_disable(sd);
  128. if (skb_queue_empty(&sd->input_pkt_queue)) {
  129. /*
  130. * Inline a custom version of __napi_complete().
  131. @@ -6075,7 +6075,7 @@ static int process_backlog(struct napi_s
  132. skb_queue_splice_tail_init(&sd->input_pkt_queue,
  133. &sd->process_queue);
  134. }
  135. - rps_unlock_irq_enable(sd);
  136. + backlog_unlock_irq_enable(sd);
  137. }
  138. return work;