706-06-v6.0-net-ethernet-mtk_eth_soc-introduce-xdp-multi-frag-su.patch 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. From 155738a4f319538a09f734ce1f5a2eac3ada1de2 Mon Sep 17 00:00:00 2001
  2. From: Lorenzo Bianconi <[email protected]>
  3. Date: Wed, 27 Jul 2022 23:20:51 +0200
  4. Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce xdp multi-frag support
  5. Add the capability to map non-linear xdp frames in XDP_TX and
  6. ndo_xdp_xmit callback.
  7. Signed-off-by: Lorenzo Bianconi <[email protected]>
  8. Signed-off-by: David S. Miller <[email protected]>
  9. ---
  10. drivers/net/ethernet/mediatek/mtk_eth_soc.c | 125 +++++++++++++-------
  11. 1 file changed, 82 insertions(+), 43 deletions(-)
  12. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  13. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  14. @@ -988,23 +988,22 @@ static void mtk_tx_unmap(struct mtk_eth
  15. }
  16. }
  17. - if (tx_buf->type == MTK_TYPE_SKB) {
  18. - if (tx_buf->data &&
  19. - tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
  20. + if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
  21. + if (tx_buf->type == MTK_TYPE_SKB) {
  22. struct sk_buff *skb = tx_buf->data;
  23. if (napi)
  24. napi_consume_skb(skb, napi);
  25. else
  26. dev_kfree_skb_any(skb);
  27. - }
  28. - } else if (tx_buf->data) {
  29. - struct xdp_frame *xdpf = tx_buf->data;
  30. + } else {
  31. + struct xdp_frame *xdpf = tx_buf->data;
  32. - if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
  33. - xdp_return_frame_rx_napi(xdpf);
  34. - else
  35. - xdp_return_frame(xdpf);
  36. + if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
  37. + xdp_return_frame_rx_napi(xdpf);
  38. + else
  39. + xdp_return_frame(xdpf);
  40. + }
  41. }
  42. tx_buf->flags = 0;
  43. tx_buf->data = NULL;
  44. @@ -1507,6 +1506,8 @@ static int mtk_xdp_frame_map(struct mtk_
  45. mtk_tx_set_dma_desc(dev, txd, txd_info);
  46. tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
  47. + tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
  48. + tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
  49. txd_pdma = qdma_to_pdma(ring, txd);
  50. setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
  51. @@ -1518,43 +1519,69 @@ static int mtk_xdp_frame_map(struct mtk_
  52. static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
  53. struct net_device *dev, bool dma_map)
  54. {
  55. + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
  56. const struct mtk_soc_data *soc = eth->soc;
  57. struct mtk_tx_ring *ring = &eth->tx_ring;
  58. struct mtk_tx_dma_desc_info txd_info = {
  59. .size = xdpf->len,
  60. .first = true,
  61. - .last = true,
  62. + .last = !xdp_frame_has_frags(xdpf),
  63. };
  64. - int err = 0, index = 0, n_desc = 1;
  65. - struct mtk_tx_dma *txd, *txd_pdma;
  66. - struct mtk_tx_buf *tx_buf;
  67. + int err, index = 0, n_desc = 1, nr_frags;
  68. + struct mtk_tx_dma *htxd, *txd, *txd_pdma;
  69. + struct mtk_tx_buf *htx_buf, *tx_buf;
  70. + void *data = xdpf->data;
  71. if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
  72. return -EBUSY;
  73. - if (unlikely(atomic_read(&ring->free_count) <= 1))
  74. + nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
  75. + if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
  76. return -EBUSY;
  77. spin_lock(&eth->page_lock);
  78. txd = ring->next_free;
  79. if (txd == ring->last_free) {
  80. - err = -ENOMEM;
  81. - goto out;
  82. + spin_unlock(&eth->page_lock);
  83. + return -ENOMEM;
  84. }
  85. + htxd = txd;
  86. tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
  87. memset(tx_buf, 0, sizeof(*tx_buf));
  88. + htx_buf = tx_buf;
  89. - err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
  90. - xdpf->data, xdpf->headroom, index,
  91. - dma_map);
  92. - if (err < 0)
  93. - goto out;
  94. + for (;;) {
  95. + err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
  96. + data, xdpf->headroom, index, dma_map);
  97. + if (err < 0)
  98. + goto unmap;
  99. +
  100. + if (txd_info.last)
  101. + break;
  102. + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
  103. + txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
  104. + txd_pdma = qdma_to_pdma(ring, txd);
  105. + if (txd == ring->last_free)
  106. + goto unmap;
  107. +
  108. + tx_buf = mtk_desc_to_tx_buf(ring, txd,
  109. + soc->txrx.txd_size);
  110. + memset(tx_buf, 0, sizeof(*tx_buf));
  111. + n_desc++;
  112. + }
  113. +
  114. + memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
  115. + txd_info.size = skb_frag_size(&sinfo->frags[index]);
  116. + txd_info.last = index + 1 == nr_frags;
  117. + data = skb_frag_address(&sinfo->frags[index]);
  118. +
  119. + index++;
  120. + }
  121. /* store xdpf for cleanup */
  122. - tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
  123. - tx_buf->data = xdpf;
  124. + htx_buf->data = xdpf;
  125. if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
  126. txd_pdma = qdma_to_pdma(ring, txd);
  127. @@ -1581,7 +1608,24 @@ static int mtk_xdp_submit_frame(struct m
  128. mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
  129. MT7628_TX_CTX_IDX0);
  130. }
  131. -out:
  132. +
  133. + spin_unlock(&eth->page_lock);
  134. +
  135. + return 0;
  136. +
  137. +unmap:
  138. + while (htxd != txd) {
  139. + txd_pdma = qdma_to_pdma(ring, htxd);
  140. + tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
  141. + mtk_tx_unmap(eth, tx_buf, false);
  142. +
  143. + htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
  144. + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
  145. + txd_pdma->txd2 = TX_DMA_DESP2_DEF;
  146. +
  147. + htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
  148. + }
  149. +
  150. spin_unlock(&eth->page_lock);
  151. return err;
  152. @@ -1910,18 +1954,15 @@ static int mtk_poll_tx_qdma(struct mtk_e
  153. if (!tx_buf->data)
  154. break;
  155. - if (tx_buf->type == MTK_TYPE_SKB &&
  156. - tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
  157. - struct sk_buff *skb = tx_buf->data;
  158. + if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
  159. + if (tx_buf->type == MTK_TYPE_SKB) {
  160. + struct sk_buff *skb = tx_buf->data;
  161. - bytes[mac] += skb->len;
  162. - done[mac]++;
  163. - budget--;
  164. - } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
  165. - tx_buf->type == MTK_TYPE_XDP_NDO) {
  166. + bytes[mac] += skb->len;
  167. + done[mac]++;
  168. + }
  169. budget--;
  170. }
  171. -
  172. mtk_tx_unmap(eth, tx_buf, true);
  173. ring->last_free = desc;
  174. @@ -1952,17 +1993,15 @@ static int mtk_poll_tx_pdma(struct mtk_e
  175. if (!tx_buf->data)
  176. break;
  177. - if (tx_buf->type == MTK_TYPE_SKB &&
  178. - tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
  179. - struct sk_buff *skb = tx_buf->data;
  180. - bytes[0] += skb->len;
  181. - done[0]++;
  182. - budget--;
  183. - } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
  184. - tx_buf->type == MTK_TYPE_XDP_NDO) {
  185. + if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
  186. + if (tx_buf->type == MTK_TYPE_SKB) {
  187. + struct sk_buff *skb = tx_buf->data;
  188. +
  189. + bytes[0] += skb->len;
  190. + done[0]++;
  191. + }
  192. budget--;
  193. }
  194. -
  195. mtk_tx_unmap(eth, tx_buf, true);
  196. desc = ring->dma + cpu * eth->soc->txrx.txd_size;