2
0

706-00-v6.0-net-ethernet-mtk_eth_soc-rely-on-page_pool-for-singl.patch 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330
  1. From 23233e577ef973c2c5d0dd757a0a4605e34ecb57 Mon Sep 17 00:00:00 2001
  2. From: Lorenzo Bianconi <[email protected]>
  3. Date: Fri, 22 Jul 2022 09:19:36 +0200
  4. Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on page_pool for single page
  5. buffers
  6. Rely on page_pool allocator for single page buffers in order to keep
  7. them dma mapped and add skb recycling support.
  8. Signed-off-by: Lorenzo Bianconi <[email protected]>
  9. Signed-off-by: David S. Miller <[email protected]>
  10. ---
  11. drivers/net/ethernet/mediatek/Kconfig | 1 +
  12. drivers/net/ethernet/mediatek/mtk_eth_soc.c | 185 +++++++++++++++-----
  13. drivers/net/ethernet/mediatek/mtk_eth_soc.h | 10 ++
  14. 3 files changed, 156 insertions(+), 40 deletions(-)
  15. --- a/drivers/net/ethernet/mediatek/Kconfig
  16. +++ b/drivers/net/ethernet/mediatek/Kconfig
  17. @@ -16,6 +16,7 @@ config NET_MEDIATEK_SOC
  18. depends on NET_DSA || !NET_DSA
  19. select PHYLINK
  20. select DIMLIB
  21. + select PAGE_POOL
  22. help
  23. This driver supports the gigabit ethernet MACs in the
  24. MediaTek SoC family.
  25. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  26. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  27. @@ -1388,6 +1388,68 @@ static void mtk_update_rx_cpu_idx(struct
  28. }
  29. }
  30. +static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
  31. + struct xdp_rxq_info *xdp_q,
  32. + int id, int size)
  33. +{
  34. + struct page_pool_params pp_params = {
  35. + .order = 0,
  36. + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
  37. + .pool_size = size,
  38. + .nid = NUMA_NO_NODE,
  39. + .dev = eth->dma_dev,
  40. + .dma_dir = DMA_FROM_DEVICE,
  41. + .offset = MTK_PP_HEADROOM,
  42. + .max_len = MTK_PP_MAX_BUF_SIZE,
  43. + };
  44. + struct page_pool *pp;
  45. + int err;
  46. +
  47. + pp = page_pool_create(&pp_params);
  48. + if (IS_ERR(pp))
  49. + return pp;
  50. +
  51. + err = xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
  52. + eth->rx_napi.napi_id);
  53. + if (err < 0)
  54. + goto err_free_pp;
  55. +
  56. + err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
  57. + if (err)
  58. + goto err_unregister_rxq;
  59. +
  60. + return pp;
  61. +
  62. +err_unregister_rxq:
  63. + xdp_rxq_info_unreg(xdp_q);
  64. +err_free_pp:
  65. + page_pool_destroy(pp);
  66. +
  67. + return ERR_PTR(err);
  68. +}
  69. +
  70. +static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
  71. + gfp_t gfp_mask)
  72. +{
  73. + struct page *page;
  74. +
  75. + page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
  76. + if (!page)
  77. + return NULL;
  78. +
  79. + *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
  80. + return page_address(page);
  81. +}
  82. +
  83. +static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
  84. +{
  85. + if (ring->page_pool)
  86. + page_pool_put_full_page(ring->page_pool,
  87. + virt_to_head_page(data), napi);
  88. + else
  89. + skb_free_frag(data);
  90. +}
  91. +
  92. static int mtk_poll_rx(struct napi_struct *napi, int budget,
  93. struct mtk_eth *eth)
  94. {
  95. @@ -1401,9 +1463,9 @@ static int mtk_poll_rx(struct napi_struc
  96. while (done < budget) {
  97. unsigned int pktlen, *rxdcsum;
  98. + u32 hash, reason, reserve_len;
  99. struct net_device *netdev;
  100. dma_addr_t dma_addr;
  101. - u32 hash, reason;
  102. int mac = 0;
  103. ring = mtk_get_rx_ring(eth);
  104. @@ -1434,36 +1496,54 @@ static int mtk_poll_rx(struct napi_struc
  105. goto release_desc;
  106. /* alloc new buffer */
  107. - if (ring->frag_size <= PAGE_SIZE)
  108. - new_data = napi_alloc_frag(ring->frag_size);
  109. - else
  110. - new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
  111. - if (unlikely(!new_data)) {
  112. - netdev->stats.rx_dropped++;
  113. - goto release_desc;
  114. - }
  115. - dma_addr = dma_map_single(eth->dma_dev,
  116. - new_data + NET_SKB_PAD +
  117. - eth->ip_align,
  118. - ring->buf_size,
  119. - DMA_FROM_DEVICE);
  120. - if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
  121. - skb_free_frag(new_data);
  122. - netdev->stats.rx_dropped++;
  123. - goto release_desc;
  124. - }
  125. + if (ring->page_pool) {
  126. + new_data = mtk_page_pool_get_buff(ring->page_pool,
  127. + &dma_addr,
  128. + GFP_ATOMIC);
  129. + if (unlikely(!new_data)) {
  130. + netdev->stats.rx_dropped++;
  131. + goto release_desc;
  132. + }
  133. + } else {
  134. + if (ring->frag_size <= PAGE_SIZE)
  135. + new_data = napi_alloc_frag(ring->frag_size);
  136. + else
  137. + new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
  138. +
  139. + if (unlikely(!new_data)) {
  140. + netdev->stats.rx_dropped++;
  141. + goto release_desc;
  142. + }
  143. - dma_unmap_single(eth->dma_dev, trxd.rxd1,
  144. - ring->buf_size, DMA_FROM_DEVICE);
  145. + dma_addr = dma_map_single(eth->dma_dev,
  146. + new_data + NET_SKB_PAD + eth->ip_align,
  147. + ring->buf_size, DMA_FROM_DEVICE);
  148. + if (unlikely(dma_mapping_error(eth->dma_dev,
  149. + dma_addr))) {
  150. + skb_free_frag(new_data);
  151. + netdev->stats.rx_dropped++;
  152. + goto release_desc;
  153. + }
  154. +
  155. + dma_unmap_single(eth->dma_dev, trxd.rxd1,
  156. + ring->buf_size, DMA_FROM_DEVICE);
  157. + }
  158. /* receive data */
  159. skb = build_skb(data, ring->frag_size);
  160. if (unlikely(!skb)) {
  161. - skb_free_frag(data);
  162. + mtk_rx_put_buff(ring, data, true);
  163. netdev->stats.rx_dropped++;
  164. goto skip_rx;
  165. }
  166. - skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  167. +
  168. + if (ring->page_pool) {
  169. + reserve_len = MTK_PP_HEADROOM;
  170. + skb_mark_for_recycle(skb);
  171. + } else {
  172. + reserve_len = NET_SKB_PAD + NET_IP_ALIGN;
  173. + }
  174. + skb_reserve(skb, reserve_len);
  175. pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
  176. skb->dev = netdev;
  177. @@ -1517,7 +1597,6 @@ static int mtk_poll_rx(struct napi_struc
  178. skip_rx:
  179. ring->data[idx] = new_data;
  180. rxd->rxd1 = (unsigned int)dma_addr;
  181. -
  182. release_desc:
  183. if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
  184. rxd->rxd2 = RX_DMA_LSO;
  185. @@ -1525,7 +1604,6 @@ release_desc:
  186. rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
  187. ring->calc_idx = idx;
  188. -
  189. done++;
  190. }
  191. @@ -1889,13 +1967,15 @@ static int mtk_rx_alloc(struct mtk_eth *
  192. if (!ring->data)
  193. return -ENOMEM;
  194. - for (i = 0; i < rx_dma_size; i++) {
  195. - if (ring->frag_size <= PAGE_SIZE)
  196. - ring->data[i] = netdev_alloc_frag(ring->frag_size);
  197. - else
  198. - ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
  199. - if (!ring->data[i])
  200. - return -ENOMEM;
  201. + if (!eth->hwlro) {
  202. + struct page_pool *pp;
  203. +
  204. + pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
  205. + rx_dma_size);
  206. + if (IS_ERR(pp))
  207. + return PTR_ERR(pp);
  208. +
  209. + ring->page_pool = pp;
  210. }
  211. ring->dma = dma_alloc_coherent(eth->dma_dev,
  212. @@ -1906,16 +1986,33 @@ static int mtk_rx_alloc(struct mtk_eth *
  213. for (i = 0; i < rx_dma_size; i++) {
  214. struct mtk_rx_dma_v2 *rxd;
  215. -
  216. - dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
  217. - ring->data[i] + NET_SKB_PAD + eth->ip_align,
  218. - ring->buf_size,
  219. - DMA_FROM_DEVICE);
  220. - if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
  221. - return -ENOMEM;
  222. + dma_addr_t dma_addr;
  223. + void *data;
  224. rxd = ring->dma + i * eth->soc->txrx.rxd_size;
  225. + if (ring->page_pool) {
  226. + data = mtk_page_pool_get_buff(ring->page_pool,
  227. + &dma_addr, GFP_KERNEL);
  228. + if (!data)
  229. + return -ENOMEM;
  230. + } else {
  231. + if (ring->frag_size <= PAGE_SIZE)
  232. + data = netdev_alloc_frag(ring->frag_size);
  233. + else
  234. + data = mtk_max_lro_buf_alloc(GFP_KERNEL);
  235. +
  236. + if (!data)
  237. + return -ENOMEM;
  238. +
  239. + dma_addr = dma_map_single(eth->dma_dev,
  240. + data + NET_SKB_PAD + eth->ip_align,
  241. + ring->buf_size, DMA_FROM_DEVICE);
  242. + if (unlikely(dma_mapping_error(eth->dma_dev,
  243. + dma_addr)))
  244. + return -ENOMEM;
  245. + }
  246. rxd->rxd1 = (unsigned int)dma_addr;
  247. + ring->data[i] = data;
  248. if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
  249. rxd->rxd2 = RX_DMA_LSO;
  250. @@ -1931,6 +2028,7 @@ static int mtk_rx_alloc(struct mtk_eth *
  251. rxd->rxd8 = 0;
  252. }
  253. }
  254. +
  255. ring->dma_size = rx_dma_size;
  256. ring->calc_idx_update = false;
  257. ring->calc_idx = rx_dma_size - 1;
  258. @@ -1982,7 +2080,7 @@ static void mtk_rx_clean(struct mtk_eth
  259. dma_unmap_single(eth->dma_dev, rxd->rxd1,
  260. ring->buf_size, DMA_FROM_DEVICE);
  261. - skb_free_frag(ring->data[i]);
  262. + mtk_rx_put_buff(ring, ring->data[i], false);
  263. }
  264. kfree(ring->data);
  265. ring->data = NULL;
  266. @@ -1994,6 +2092,13 @@ static void mtk_rx_clean(struct mtk_eth
  267. ring->dma, ring->phys);
  268. ring->dma = NULL;
  269. }
  270. +
  271. + if (ring->page_pool) {
  272. + if (xdp_rxq_info_is_reg(&ring->xdp_q))
  273. + xdp_rxq_info_unreg(&ring->xdp_q);
  274. + page_pool_destroy(ring->page_pool);
  275. + ring->page_pool = NULL;
  276. + }
  277. }
  278. static int mtk_hwlro_rx_init(struct mtk_eth *eth)
  279. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  280. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  281. @@ -18,6 +18,8 @@
  282. #include <linux/rhashtable.h>
  283. #include <linux/dim.h>
  284. #include <linux/bitfield.h>
  285. +#include <net/page_pool.h>
  286. +#include <linux/bpf_trace.h>
  287. #include "mtk_ppe.h"
  288. #define MTK_QDMA_PAGE_SIZE 2048
  289. @@ -49,6 +51,11 @@
  290. #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
  291. #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
  292. +#define MTK_PP_HEADROOM XDP_PACKET_HEADROOM
  293. +#define MTK_PP_PAD (MTK_PP_HEADROOM + \
  294. + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  295. +#define MTK_PP_MAX_BUF_SIZE (PAGE_SIZE - MTK_PP_PAD)
  296. +
  297. #define MTK_QRX_OFFSET 0x10
  298. #define MTK_MAX_RX_RING_NUM 4
  299. @@ -743,6 +750,9 @@ struct mtk_rx_ring {
  300. bool calc_idx_update;
  301. u16 calc_idx;
  302. u32 crx_idx_reg;
  303. + /* page_pool */
  304. + struct page_pool *page_pool;
  305. + struct xdp_rxq_info xdp_q;
  306. };
  307. enum mkt_eth_capabilities {