706-03-v6.0-net-ethernet-mtk_eth_soc-add-xmit-XDP-support.patch 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. From 5886d26fd25bbe26130e3e5f7474b9b3e98a3469 Mon Sep 17 00:00:00 2001
  2. From: Lorenzo Bianconi <[email protected]>
  3. Date: Fri, 22 Jul 2022 09:19:39 +0200
  4. Subject: [PATCH] net: ethernet: mtk_eth_soc: add xmit XDP support
  5. Introduce XDP support for XDP_TX verdict and ndo_xdp_xmit function
  6. pointer.
  7. Signed-off-by: Lorenzo Bianconi <[email protected]>
  8. Signed-off-by: David S. Miller <[email protected]>
  9. ---
  10. drivers/net/ethernet/mediatek/mtk_eth_soc.c | 192 +++++++++++++++++---
  11. drivers/net/ethernet/mediatek/mtk_eth_soc.h | 10 +-
  12. 2 files changed, 180 insertions(+), 22 deletions(-)
  13. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  14. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  15. @@ -987,15 +987,26 @@ static void mtk_tx_unmap(struct mtk_eth
  16. }
  17. }
  18. - tx_buf->flags = 0;
  19. - if (tx_buf->skb &&
  20. - (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
  21. - if (napi)
  22. - napi_consume_skb(tx_buf->skb, napi);
  23. + if (tx_buf->type == MTK_TYPE_SKB) {
  24. + if (tx_buf->data &&
  25. + tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
  26. + struct sk_buff *skb = tx_buf->data;
  27. +
  28. + if (napi)
  29. + napi_consume_skb(skb, napi);
  30. + else
  31. + dev_kfree_skb_any(skb);
  32. + }
  33. + } else if (tx_buf->data) {
  34. + struct xdp_frame *xdpf = tx_buf->data;
  35. +
  36. + if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
  37. + xdp_return_frame_rx_napi(xdpf);
  38. else
  39. - dev_kfree_skb_any(tx_buf->skb);
  40. + xdp_return_frame(xdpf);
  41. }
  42. - tx_buf->skb = NULL;
  43. + tx_buf->flags = 0;
  44. + tx_buf->data = NULL;
  45. }
  46. static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
  47. @@ -1012,7 +1023,7 @@ static void setup_tx_buf(struct mtk_eth
  48. dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
  49. dma_unmap_len_set(tx_buf, dma_len1, size);
  50. } else {
  51. - tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
  52. + tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
  53. txd->txd1 = mapped_addr;
  54. txd->txd2 = TX_DMA_PLEN0(size);
  55. dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
  56. @@ -1188,7 +1199,7 @@ static int mtk_tx_map(struct sk_buff *sk
  57. soc->txrx.txd_size);
  58. if (new_desc)
  59. memset(tx_buf, 0, sizeof(*tx_buf));
  60. - tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
  61. + tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
  62. tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
  63. tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
  64. MTK_TX_FLAGS_FPORT1;
  65. @@ -1202,7 +1213,8 @@ static int mtk_tx_map(struct sk_buff *sk
  66. }
  67. /* store skb to cleanup */
  68. - itx_buf->skb = skb;
  69. + itx_buf->type = MTK_TYPE_SKB;
  70. + itx_buf->data = skb;
  71. if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
  72. if (k & 0x1)
  73. @@ -1414,13 +1426,14 @@ static struct page_pool *mtk_create_page
  74. .pool_size = size,
  75. .nid = NUMA_NO_NODE,
  76. .dev = eth->dma_dev,
  77. - .dma_dir = DMA_FROM_DEVICE,
  78. .offset = MTK_PP_HEADROOM,
  79. .max_len = MTK_PP_MAX_BUF_SIZE,
  80. };
  81. struct page_pool *pp;
  82. int err;
  83. + pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
  84. + : DMA_FROM_DEVICE;
  85. pp = page_pool_create(&pp_params);
  86. if (IS_ERR(pp))
  87. return pp;
  88. @@ -1466,6 +1479,122 @@ static void mtk_rx_put_buff(struct mtk_r
  89. skb_free_frag(data);
  90. }
  91. +static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
  92. + struct net_device *dev, bool dma_map)
  93. +{
  94. + const struct mtk_soc_data *soc = eth->soc;
  95. + struct mtk_tx_ring *ring = &eth->tx_ring;
  96. + struct mtk_tx_dma_desc_info txd_info = {
  97. + .size = xdpf->len,
  98. + .first = true,
  99. + .last = true,
  100. + };
  101. + struct mtk_mac *mac = netdev_priv(dev);
  102. + struct mtk_tx_dma *txd, *txd_pdma;
  103. + int err = 0, index = 0, n_desc = 1;
  104. + struct mtk_tx_buf *tx_buf;
  105. +
  106. + if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
  107. + return -EBUSY;
  108. +
  109. + if (unlikely(atomic_read(&ring->free_count) <= 1))
  110. + return -EBUSY;
  111. +
  112. + spin_lock(&eth->page_lock);
  113. +
  114. + txd = ring->next_free;
  115. + if (txd == ring->last_free) {
  116. + err = -ENOMEM;
  117. + goto out;
  118. + }
  119. +
  120. + tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
  121. + memset(tx_buf, 0, sizeof(*tx_buf));
  122. +
  123. + if (dma_map) { /* ndo_xdp_xmit */
  124. + txd_info.addr = dma_map_single(eth->dma_dev, xdpf->data,
  125. + txd_info.size, DMA_TO_DEVICE);
  126. + if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) {
  127. + err = -ENOMEM;
  128. + goto out;
  129. + }
  130. + tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
  131. + } else {
  132. + struct page *page = virt_to_head_page(xdpf->data);
  133. +
  134. + txd_info.addr = page_pool_get_dma_addr(page) +
  135. + sizeof(*xdpf) + xdpf->headroom;
  136. + dma_sync_single_for_device(eth->dma_dev, txd_info.addr,
  137. + txd_info.size,
  138. + DMA_BIDIRECTIONAL);
  139. + }
  140. + mtk_tx_set_dma_desc(dev, txd, &txd_info);
  141. +
  142. + tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
  143. +
  144. + txd_pdma = qdma_to_pdma(ring, txd);
  145. + setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, txd_info.size,
  146. + index++);
  147. +
  148. + /* store xdpf for cleanup */
  149. + tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
  150. + tx_buf->data = xdpf;
  151. +
  152. + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
  153. + if (index & 1)
  154. + txd_pdma->txd2 |= TX_DMA_LS0;
  155. + else
  156. + txd_pdma->txd2 |= TX_DMA_LS1;
  157. + }
  158. +
  159. + ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
  160. + atomic_sub(n_desc, &ring->free_count);
  161. +
  162. + /* make sure that all changes to the dma ring are flushed before we
  163. + * continue
  164. + */
  165. + wmb();
  166. +
  167. + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
  168. + mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
  169. + } else {
  170. + int idx;
  171. +
  172. + idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
  173. + mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
  174. + MT7628_TX_CTX_IDX0);
  175. + }
  176. +out:
  177. + spin_unlock(&eth->page_lock);
  178. +
  179. + return err;
  180. +}
  181. +
  182. +static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
  183. + struct xdp_frame **frames, u32 flags)
  184. +{
  185. + struct mtk_mac *mac = netdev_priv(dev);
  186. + struct mtk_hw_stats *hw_stats = mac->hw_stats;
  187. + struct mtk_eth *eth = mac->hw;
  188. + int i, nxmit = 0;
  189. +
  190. + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
  191. + return -EINVAL;
  192. +
  193. + for (i = 0; i < num_frame; i++) {
  194. + if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
  195. + break;
  196. + nxmit++;
  197. + }
  198. +
  199. + u64_stats_update_begin(&hw_stats->syncp);
  200. + hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
  201. + hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
  202. + u64_stats_update_end(&hw_stats->syncp);
  203. +
  204. + return nxmit;
  205. +}
  206. +
  207. static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
  208. struct xdp_buff *xdp, struct net_device *dev)
  209. {
  210. @@ -1494,6 +1623,18 @@ static u32 mtk_xdp_run(struct mtk_eth *e
  211. count = &hw_stats->xdp_stats.rx_xdp_redirect;
  212. goto update_stats;
  213. + case XDP_TX: {
  214. + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
  215. +
  216. + if (mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
  217. + count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
  218. + act = XDP_DROP;
  219. + break;
  220. + }
  221. +
  222. + count = &hw_stats->xdp_stats.rx_xdp_tx;
  223. + goto update_stats;
  224. + }
  225. default:
  226. bpf_warn_invalid_xdp_action(act);
  227. fallthrough;
  228. @@ -1727,9 +1868,8 @@ static int mtk_poll_tx_qdma(struct mtk_e
  229. {
  230. const struct mtk_reg_map *reg_map = eth->soc->reg_map;
  231. struct mtk_tx_ring *ring = &eth->tx_ring;
  232. - struct mtk_tx_dma *desc;
  233. - struct sk_buff *skb;
  234. struct mtk_tx_buf *tx_buf;
  235. + struct mtk_tx_dma *desc;
  236. u32 cpu, dma;
  237. cpu = ring->last_free_ptr;
  238. @@ -1750,15 +1890,21 @@ static int mtk_poll_tx_qdma(struct mtk_e
  239. if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
  240. mac = 1;
  241. - skb = tx_buf->skb;
  242. - if (!skb)
  243. + if (!tx_buf->data)
  244. break;
  245. - if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
  246. + if (tx_buf->type == MTK_TYPE_SKB &&
  247. + tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
  248. + struct sk_buff *skb = tx_buf->data;
  249. +
  250. bytes[mac] += skb->len;
  251. done[mac]++;
  252. budget--;
  253. + } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
  254. + tx_buf->type == MTK_TYPE_XDP_NDO) {
  255. + budget--;
  256. }
  257. +
  258. mtk_tx_unmap(eth, tx_buf, true);
  259. ring->last_free = desc;
  260. @@ -1777,9 +1923,8 @@ static int mtk_poll_tx_pdma(struct mtk_e
  261. unsigned int *done, unsigned int *bytes)
  262. {
  263. struct mtk_tx_ring *ring = &eth->tx_ring;
  264. - struct mtk_tx_dma *desc;
  265. - struct sk_buff *skb;
  266. struct mtk_tx_buf *tx_buf;
  267. + struct mtk_tx_dma *desc;
  268. u32 cpu, dma;
  269. cpu = ring->cpu_idx;
  270. @@ -1787,14 +1932,18 @@ static int mtk_poll_tx_pdma(struct mtk_e
  271. while ((cpu != dma) && budget) {
  272. tx_buf = &ring->buf[cpu];
  273. - skb = tx_buf->skb;
  274. - if (!skb)
  275. + if (!tx_buf->data)
  276. break;
  277. - if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
  278. + if (tx_buf->type == MTK_TYPE_SKB &&
  279. + tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
  280. + struct sk_buff *skb = tx_buf->data;
  281. bytes[0] += skb->len;
  282. done[0]++;
  283. budget--;
  284. + } else if (tx_buf->type == MTK_TYPE_XDP_TX ||
  285. + tx_buf->type == MTK_TYPE_XDP_NDO) {
  286. + budget--;
  287. }
  288. mtk_tx_unmap(eth, tx_buf, true);
  289. @@ -3462,6 +3611,7 @@ static const struct net_device_ops mtk_n
  290. #endif
  291. .ndo_setup_tc = mtk_eth_setup_tc,
  292. .ndo_bpf = mtk_xdp,
  293. + .ndo_xdp_xmit = mtk_xdp_xmit,
  294. };
  295. static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
  296. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  297. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  298. @@ -694,6 +694,12 @@ enum mtk_dev_state {
  299. MTK_RESETTING
  300. };
  301. +enum mtk_tx_buf_type {
  302. + MTK_TYPE_SKB,
  303. + MTK_TYPE_XDP_TX,
  304. + MTK_TYPE_XDP_NDO,
  305. +};
  306. +
  307. /* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
  308. * by the TX descriptor s
  309. * @skb: The SKB pointer of the packet being sent
  310. @@ -703,7 +709,9 @@ enum mtk_dev_state {
  311. * @dma_len1: The length of the second segment
  312. */
  313. struct mtk_tx_buf {
  314. - struct sk_buff *skb;
  315. + enum mtk_tx_buf_type type;
  316. + void *data;
  317. +
  318. u32 flags;
  319. DEFINE_DMA_UNMAP_ADDR(dma_addr0);
  320. DEFINE_DMA_UNMAP_LEN(dma_len0);