706-01-v6.0-net-ethernet-mtk_eth_soc-add-basic-XDP-support.patch 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. From 7c26c20da5d420cde55618263be4aa2f6de53056 Mon Sep 17 00:00:00 2001
  2. From: Lorenzo Bianconi <[email protected]>
  3. Date: Fri, 22 Jul 2022 09:19:37 +0200
  4. Subject: [PATCH] net: ethernet: mtk_eth_soc: add basic XDP support
  5. Introduce basic XDP support to mtk_eth_soc driver.
  6. Supported XDP verdicts:
  7. - XDP_PASS
  8. - XDP_DROP
  9. - XDP_REDIRECT
  10. Signed-off-by: Lorenzo Bianconi <[email protected]>
  11. Signed-off-by: David S. Miller <[email protected]>
  12. ---
  13. drivers/net/ethernet/mediatek/mtk_eth_soc.c | 162 +++++++++++++++++---
  14. drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 +
  15. 2 files changed, 145 insertions(+), 19 deletions(-)
  16. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  17. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  18. @@ -1388,6 +1388,11 @@ static void mtk_update_rx_cpu_idx(struct
  19. }
  20. }
  21. +static bool mtk_page_pool_enabled(struct mtk_eth *eth)
  22. +{
  23. + return !eth->hwlro;
  24. +}
  25. +
  26. static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
  27. struct xdp_rxq_info *xdp_q,
  28. int id, int size)
  29. @@ -1450,11 +1455,52 @@ static void mtk_rx_put_buff(struct mtk_r
  30. skb_free_frag(data);
  31. }
  32. +static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
  33. + struct xdp_buff *xdp, struct net_device *dev)
  34. +{
  35. + struct bpf_prog *prog;
  36. + u32 act = XDP_PASS;
  37. +
  38. + rcu_read_lock();
  39. +
  40. + prog = rcu_dereference(eth->prog);
  41. + if (!prog)
  42. + goto out;
  43. +
  44. + act = bpf_prog_run_xdp(prog, xdp);
  45. + switch (act) {
  46. + case XDP_PASS:
  47. + goto out;
  48. + case XDP_REDIRECT:
  49. + if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
  50. + act = XDP_DROP;
  51. + break;
  52. + }
  53. + goto out;
  54. + default:
  55. + bpf_warn_invalid_xdp_action(act);
  56. + fallthrough;
  57. + case XDP_ABORTED:
  58. + trace_xdp_exception(dev, prog, act);
  59. + fallthrough;
  60. + case XDP_DROP:
  61. + break;
  62. + }
  63. +
  64. + page_pool_put_full_page(ring->page_pool,
  65. + virt_to_head_page(xdp->data), true);
  66. +out:
  67. + rcu_read_unlock();
  68. +
  69. + return act;
  70. +}
  71. +
  72. static int mtk_poll_rx(struct napi_struct *napi, int budget,
  73. struct mtk_eth *eth)
  74. {
  75. struct dim_sample dim_sample = {};
  76. struct mtk_rx_ring *ring;
  77. + bool xdp_flush = false;
  78. int idx;
  79. struct sk_buff *skb;
  80. u8 *data, *new_data;
  81. @@ -1463,9 +1509,9 @@ static int mtk_poll_rx(struct napi_struc
  82. while (done < budget) {
  83. unsigned int pktlen, *rxdcsum;
  84. - u32 hash, reason, reserve_len;
  85. struct net_device *netdev;
  86. dma_addr_t dma_addr;
  87. + u32 hash, reason;
  88. int mac = 0;
  89. ring = mtk_get_rx_ring(eth);
  90. @@ -1495,8 +1541,14 @@ static int mtk_poll_rx(struct napi_struc
  91. if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
  92. goto release_desc;
  93. + pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
  94. +
  95. /* alloc new buffer */
  96. if (ring->page_pool) {
  97. + struct page *page = virt_to_head_page(data);
  98. + struct xdp_buff xdp;
  99. + u32 ret;
  100. +
  101. new_data = mtk_page_pool_get_buff(ring->page_pool,
  102. &dma_addr,
  103. GFP_ATOMIC);
  104. @@ -1504,6 +1556,34 @@ static int mtk_poll_rx(struct napi_struc
  105. netdev->stats.rx_dropped++;
  106. goto release_desc;
  107. }
  108. +
  109. + dma_sync_single_for_cpu(eth->dma_dev,
  110. + page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
  111. + pktlen, page_pool_get_dma_dir(ring->page_pool));
  112. +
  113. + xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
  114. + xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
  115. + false);
  116. + xdp_buff_clear_frags_flag(&xdp);
  117. +
  118. + ret = mtk_xdp_run(eth, ring, &xdp, netdev);
  119. + if (ret == XDP_REDIRECT)
  120. + xdp_flush = true;
  121. +
  122. + if (ret != XDP_PASS)
  123. + goto skip_rx;
  124. +
  125. + skb = build_skb(data, PAGE_SIZE);
  126. + if (unlikely(!skb)) {
  127. + page_pool_put_full_page(ring->page_pool,
  128. + page, true);
  129. + netdev->stats.rx_dropped++;
  130. + goto skip_rx;
  131. + }
  132. +
  133. + skb_reserve(skb, xdp.data - xdp.data_hard_start);
  134. + skb_put(skb, xdp.data_end - xdp.data);
  135. + skb_mark_for_recycle(skb);
  136. } else {
  137. if (ring->frag_size <= PAGE_SIZE)
  138. new_data = napi_alloc_frag(ring->frag_size);
  139. @@ -1527,27 +1607,20 @@ static int mtk_poll_rx(struct napi_struc
  140. dma_unmap_single(eth->dma_dev, trxd.rxd1,
  141. ring->buf_size, DMA_FROM_DEVICE);
  142. - }
  143. - /* receive data */
  144. - skb = build_skb(data, ring->frag_size);
  145. - if (unlikely(!skb)) {
  146. - mtk_rx_put_buff(ring, data, true);
  147. - netdev->stats.rx_dropped++;
  148. - goto skip_rx;
  149. - }
  150. + skb = build_skb(data, ring->frag_size);
  151. + if (unlikely(!skb)) {
  152. + netdev->stats.rx_dropped++;
  153. + skb_free_frag(data);
  154. + goto skip_rx;
  155. + }
  156. - if (ring->page_pool) {
  157. - reserve_len = MTK_PP_HEADROOM;
  158. - skb_mark_for_recycle(skb);
  159. - } else {
  160. - reserve_len = NET_SKB_PAD + NET_IP_ALIGN;
  161. + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  162. + skb_put(skb, pktlen);
  163. }
  164. - skb_reserve(skb, reserve_len);
  165. - pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
  166. skb->dev = netdev;
  167. - skb_put(skb, pktlen);
  168. + bytes += skb->len;
  169. if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  170. rxdcsum = &trxd.rxd3;
  171. @@ -1559,7 +1632,6 @@ static int mtk_poll_rx(struct napi_struc
  172. else
  173. skb_checksum_none_assert(skb);
  174. skb->protocol = eth_type_trans(skb, netdev);
  175. - bytes += pktlen;
  176. hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
  177. if (hash != MTK_RXD4_FOE_ENTRY) {
  178. @@ -1622,6 +1694,9 @@ rx_done:
  179. &dim_sample);
  180. net_dim(&eth->rx_dim, dim_sample);
  181. + if (xdp_flush)
  182. + xdp_do_flush_map();
  183. +
  184. return done;
  185. }
  186. @@ -1967,7 +2042,7 @@ static int mtk_rx_alloc(struct mtk_eth *
  187. if (!ring->data)
  188. return -ENOMEM;
  189. - if (!eth->hwlro) {
  190. + if (mtk_page_pool_enabled(eth)) {
  191. struct page_pool *pp;
  192. pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
  193. @@ -2712,6 +2787,48 @@ static int mtk_stop(struct net_device *d
  194. return 0;
  195. }
  196. +static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
  197. + struct netlink_ext_ack *extack)
  198. +{
  199. + struct mtk_mac *mac = netdev_priv(dev);
  200. + struct mtk_eth *eth = mac->hw;
  201. + struct bpf_prog *old_prog;
  202. + bool need_update;
  203. +
  204. + if (eth->hwlro) {
  205. + NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
  206. + return -EOPNOTSUPP;
  207. + }
  208. +
  209. + if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
  210. + NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
  211. + return -EOPNOTSUPP;
  212. + }
  213. +
  214. + need_update = !!eth->prog != !!prog;
  215. + if (netif_running(dev) && need_update)
  216. + mtk_stop(dev);
  217. +
  218. + old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
  219. + if (old_prog)
  220. + bpf_prog_put(old_prog);
  221. +
  222. + if (netif_running(dev) && need_update)
  223. + return mtk_open(dev);
  224. +
  225. + return 0;
  226. +}
  227. +
  228. +static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
  229. +{
  230. + switch (xdp->command) {
  231. + case XDP_SETUP_PROG:
  232. + return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
  233. + default:
  234. + return -EINVAL;
  235. + }
  236. +}
  237. +
  238. static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
  239. {
  240. regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
  241. @@ -2990,6 +3107,12 @@ static int mtk_change_mtu(struct net_dev
  242. struct mtk_eth *eth = mac->hw;
  243. u32 mcr_cur, mcr_new;
  244. + if (rcu_access_pointer(eth->prog) &&
  245. + length > MTK_PP_MAX_BUF_SIZE) {
  246. + netdev_err(dev, "Invalid MTU for XDP mode\n");
  247. + return -EINVAL;
  248. + }
  249. +
  250. if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
  251. mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
  252. mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
  253. @@ -3316,6 +3439,7 @@ static const struct net_device_ops mtk_n
  254. .ndo_poll_controller = mtk_poll_controller,
  255. #endif
  256. .ndo_setup_tc = mtk_eth_setup_tc,
  257. + .ndo_bpf = mtk_xdp,
  258. };
  259. static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
  260. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  261. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  262. @@ -1086,6 +1086,8 @@ struct mtk_eth {
  263. struct mtk_ppe *ppe;
  264. struct rhashtable flow_table;
  265. +
  266. + struct bpf_prog __rcu *prog;
  267. };
  268. /* struct mtk_mac - the structure that holds the info about the MACs of the