700-02-net-stmmac-move-TX-timer-arm-after-DMA-enable.patch 3.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. From fb04db35447d1e8ff557c8e57139164cecab7de5 Mon Sep 17 00:00:00 2001
  2. From: Christian Marangi <[email protected]>
  3. Date: Wed, 27 Sep 2023 15:38:31 +0200
  4. Subject: [PATCH 2/4] net: stmmac: move TX timer arm after DMA enable
  5. Move TX timer arm call after DMA interrupt is enabled again.
  6. The TX timer arm function changed logic and now is skipped if a napi is
  7. already scheduled. By moving the TX timer arm call after DMA is enabled,
  8. we permit to correctly skip if a DMA interrupt has been fired and a napi
  9. has been scheduled again.
  10. Signed-off-by: Christian Marangi <[email protected]>
  11. ---
  12. .../net/ethernet/stmicro/stmmac/stmmac_main.c | 19 +++++++++++++++----
  13. 1 file changed, 15 insertions(+), 4 deletions(-)
  14. --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
  15. +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
  16. @@ -2530,7 +2530,8 @@ static void stmmac_bump_dma_threshold(st
  17. * @queue: TX queue index
  18. * Description: it reclaims the transmit resources after transmission completes.
  19. */
  20. -static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
  21. +static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
  22. + bool *pending_packets)
  23. {
  24. struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  25. unsigned int bytes_compl = 0, pkts_compl = 0;
  26. @@ -2693,7 +2694,7 @@ static int stmmac_tx_clean(struct stmmac
  27. /* We still have pending packets, let's call for a new scheduling */
  28. if (tx_q->dirty_tx != tx_q->cur_tx)
  29. - stmmac_tx_timer_arm(priv, queue);
  30. + *pending_packets = true;
  31. __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
  32. @@ -5476,12 +5477,13 @@ static int stmmac_napi_poll_tx(struct na
  33. struct stmmac_channel *ch =
  34. container_of(napi, struct stmmac_channel, tx_napi);
  35. struct stmmac_priv *priv = ch->priv_data;
  36. + bool pending_packets = false;
  37. u32 chan = ch->index;
  38. int work_done;
  39. priv->xstats.napi_poll++;
  40. - work_done = stmmac_tx_clean(priv, budget, chan);
  41. + work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
  42. work_done = min(work_done, budget);
  43. if (work_done < budget && napi_complete_done(napi, work_done)) {
  44. @@ -5492,6 +5494,10 @@ static int stmmac_napi_poll_tx(struct na
  45. spin_unlock_irqrestore(&ch->lock, flags);
  46. }
  47. + /* TX still have packet to handle, check if we need to arm tx timer */
  48. + if (pending_packets)
  49. + stmmac_tx_timer_arm(priv, chan);
  50. +
  51. return work_done;
  52. }
  53. @@ -5501,11 +5507,12 @@ static int stmmac_napi_poll_rxtx(struct
  54. container_of(napi, struct stmmac_channel, rxtx_napi);
  55. struct stmmac_priv *priv = ch->priv_data;
  56. int rx_done, tx_done, rxtx_done;
  57. + bool tx_pending_packets = false;
  58. u32 chan = ch->index;
  59. priv->xstats.napi_poll++;
  60. - tx_done = stmmac_tx_clean(priv, budget, chan);
  61. + tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
  62. tx_done = min(tx_done, budget);
  63. rx_done = stmmac_rx_zc(priv, budget, chan);
  64. @@ -5530,6 +5537,10 @@ static int stmmac_napi_poll_rxtx(struct
  65. spin_unlock_irqrestore(&ch->lock, flags);
  66. }
  67. + /* TX still have packet to handle, check if we need to arm tx timer */
  68. + if (tx_pending_packets)
  69. + stmmac_tx_timer_arm(priv, chan);
  70. +
  71. return min(rxtx_done, budget - 1);
  72. }