730-02-v6.3-net-ethernet-mtk_eth_soc-increase-tx-ring-side-for-Q.patch 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. From: Felix Fietkau <[email protected]>
  2. Date: Thu, 27 Oct 2022 19:53:57 +0200
  3. Subject: [PATCH] net: ethernet: mtk_eth_soc: increase tx ring side for
  4. QDMA devices
  5. In order to use the hardware traffic shaper feature, a larger tx ring is
  6. needed, especially for the scratch ring, which the hardware shaper uses to
  7. reorder packets.
  8. Signed-off-by: Felix Fietkau <[email protected]>
  9. ---
  10. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  11. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  12. @@ -900,7 +900,7 @@ static int mtk_init_fq_dma(struct mtk_et
  13. {
  14. const struct mtk_soc_data *soc = eth->soc;
  15. dma_addr_t phy_ring_tail;
  16. - int cnt = MTK_DMA_SIZE;
  17. + int cnt = MTK_QDMA_RING_SIZE;
  18. dma_addr_t dma_addr;
  19. int i;
  20. @@ -2154,19 +2154,25 @@ static int mtk_tx_alloc(struct mtk_eth *
  21. struct mtk_tx_ring *ring = &eth->tx_ring;
  22. int i, sz = soc->txrx.txd_size;
  23. struct mtk_tx_dma_v2 *txd;
  24. + int ring_size;
  25. - ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
  26. + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
  27. + ring_size = MTK_QDMA_RING_SIZE;
  28. + else
  29. + ring_size = MTK_DMA_SIZE;
  30. +
  31. + ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
  32. GFP_KERNEL);
  33. if (!ring->buf)
  34. goto no_tx_mem;
  35. - ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
  36. + ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
  37. &ring->phys, GFP_KERNEL);
  38. if (!ring->dma)
  39. goto no_tx_mem;
  40. - for (i = 0; i < MTK_DMA_SIZE; i++) {
  41. - int next = (i + 1) % MTK_DMA_SIZE;
  42. + for (i = 0; i < ring_size; i++) {
  43. + int next = (i + 1) % ring_size;
  44. u32 next_ptr = ring->phys + next * sz;
  45. txd = ring->dma + i * sz;
  46. @@ -2186,22 +2192,22 @@ static int mtk_tx_alloc(struct mtk_eth *
  47. * descriptors in ring->dma_pdma.
  48. */
  49. if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
  50. - ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
  51. + ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
  52. &ring->phys_pdma, GFP_KERNEL);
  53. if (!ring->dma_pdma)
  54. goto no_tx_mem;
  55. - for (i = 0; i < MTK_DMA_SIZE; i++) {
  56. + for (i = 0; i < ring_size; i++) {
  57. ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
  58. ring->dma_pdma[i].txd4 = 0;
  59. }
  60. }
  61. - ring->dma_size = MTK_DMA_SIZE;
  62. - atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
  63. + ring->dma_size = ring_size;
  64. + atomic_set(&ring->free_count, ring_size - 2);
  65. ring->next_free = ring->dma;
  66. ring->last_free = (void *)txd;
  67. - ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
  68. + ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
  69. ring->thresh = MAX_SKB_FRAGS;
  70. /* make sure that all changes to the dma ring are flushed before we
  71. @@ -2213,14 +2219,14 @@ static int mtk_tx_alloc(struct mtk_eth *
  72. mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
  73. mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
  74. mtk_w32(eth,
  75. - ring->phys + ((MTK_DMA_SIZE - 1) * sz),
  76. + ring->phys + ((ring_size - 1) * sz),
  77. soc->reg_map->qdma.crx_ptr);
  78. mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
  79. mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
  80. soc->reg_map->qdma.qtx_cfg);
  81. } else {
  82. mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
  83. - mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
  84. + mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
  85. mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
  86. mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
  87. }
  88. @@ -2238,7 +2244,7 @@ static void mtk_tx_clean(struct mtk_eth
  89. int i;
  90. if (ring->buf) {
  91. - for (i = 0; i < MTK_DMA_SIZE; i++)
  92. + for (i = 0; i < ring->dma_size; i++)
  93. mtk_tx_unmap(eth, &ring->buf[i], false);
  94. kfree(ring->buf);
  95. ring->buf = NULL;
  96. @@ -2246,14 +2252,14 @@ static void mtk_tx_clean(struct mtk_eth
  97. if (ring->dma) {
  98. dma_free_coherent(eth->dma_dev,
  99. - MTK_DMA_SIZE * soc->txrx.txd_size,
  100. + ring->dma_size * soc->txrx.txd_size,
  101. ring->dma, ring->phys);
  102. ring->dma = NULL;
  103. }
  104. if (ring->dma_pdma) {
  105. dma_free_coherent(eth->dma_dev,
  106. - MTK_DMA_SIZE * soc->txrx.txd_size,
  107. + ring->dma_size * soc->txrx.txd_size,
  108. ring->dma_pdma, ring->phys_pdma);
  109. ring->dma_pdma = NULL;
  110. }
  111. @@ -2776,7 +2782,7 @@ static void mtk_dma_free(struct mtk_eth
  112. netdev_reset_queue(eth->netdev[i]);
  113. if (eth->scratch_ring) {
  114. dma_free_coherent(eth->dma_dev,
  115. - MTK_DMA_SIZE * soc->txrx.txd_size,
  116. + MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
  117. eth->scratch_ring, eth->phy_scratch_ring);
  118. eth->scratch_ring = NULL;
  119. eth->phy_scratch_ring = 0;
  120. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  121. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  122. @@ -27,6 +27,7 @@
  123. #define MTK_MAX_RX_LENGTH_2K 2048
  124. #define MTK_TX_DMA_BUF_LEN 0x3fff
  125. #define MTK_TX_DMA_BUF_LEN_V2 0xffff
  126. +#define MTK_QDMA_RING_SIZE 2048
  127. #define MTK_DMA_SIZE 512
  128. #define MTK_MAC_COUNT 2
  129. #define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + ETH_FCS_LEN)