123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330 |
- From 23233e577ef973c2c5d0dd757a0a4605e34ecb57 Mon Sep 17 00:00:00 2001
- From: Lorenzo Bianconi <[email protected]>
- Date: Fri, 22 Jul 2022 09:19:36 +0200
- Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on page_pool for single page
- buffers
- Rely on page_pool allocator for single page buffers in order to keep
- them dma mapped and add skb recycling support.
- Signed-off-by: Lorenzo Bianconi <[email protected]>
- Signed-off-by: David S. Miller <[email protected]>
- ---
- drivers/net/ethernet/mediatek/Kconfig | 1 +
- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 185 +++++++++++++++-----
- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 10 ++
- 3 files changed, 156 insertions(+), 40 deletions(-)
- --- a/drivers/net/ethernet/mediatek/Kconfig
- +++ b/drivers/net/ethernet/mediatek/Kconfig
- @@ -16,6 +16,7 @@ config NET_MEDIATEK_SOC
- depends on NET_DSA || !NET_DSA
- select PHYLINK
- select DIMLIB
- + select PAGE_POOL
- help
- This driver supports the gigabit ethernet MACs in the
- MediaTek SoC family.
- --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
- +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
- @@ -1388,6 +1388,68 @@ static void mtk_update_rx_cpu_idx(struct
- }
- }
-
- +static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
- + struct xdp_rxq_info *xdp_q,
- + int id, int size)
- +{
- + struct page_pool_params pp_params = {
- + .order = 0,
- + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
- + .pool_size = size,
- + .nid = NUMA_NO_NODE,
- + .dev = eth->dma_dev,
- + .dma_dir = DMA_FROM_DEVICE,
- + .offset = MTK_PP_HEADROOM,
- + .max_len = MTK_PP_MAX_BUF_SIZE,
- + };
- + struct page_pool *pp;
- + int err;
- +
- + pp = page_pool_create(&pp_params);
- + if (IS_ERR(pp))
- + return pp;
- +
- + err = xdp_rxq_info_reg(xdp_q, ð->dummy_dev, id,
- + eth->rx_napi.napi_id);
- + if (err < 0)
- + goto err_free_pp;
- +
- + err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
- + if (err)
- + goto err_unregister_rxq;
- +
- + return pp;
- +
- +err_unregister_rxq:
- + xdp_rxq_info_unreg(xdp_q);
- +err_free_pp:
- + page_pool_destroy(pp);
- +
- + return ERR_PTR(err);
- +}
- +
- +static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
- + gfp_t gfp_mask)
- +{
- + struct page *page;
- +
- + page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
- + if (!page)
- + return NULL;
- +
- + *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
- + return page_address(page);
- +}
- +
- +static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
- +{
- + if (ring->page_pool)
- + page_pool_put_full_page(ring->page_pool,
- + virt_to_head_page(data), napi);
- + else
- + skb_free_frag(data);
- +}
- +
- static int mtk_poll_rx(struct napi_struct *napi, int budget,
- struct mtk_eth *eth)
- {
- @@ -1401,9 +1463,9 @@ static int mtk_poll_rx(struct napi_struc
-
- while (done < budget) {
- unsigned int pktlen, *rxdcsum;
- + u32 hash, reason, reserve_len;
- struct net_device *netdev;
- dma_addr_t dma_addr;
- - u32 hash, reason;
- int mac = 0;
-
- ring = mtk_get_rx_ring(eth);
- @@ -1434,36 +1496,54 @@ static int mtk_poll_rx(struct napi_struc
- goto release_desc;
-
- /* alloc new buffer */
- - if (ring->frag_size <= PAGE_SIZE)
- - new_data = napi_alloc_frag(ring->frag_size);
- - else
- - new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
- - if (unlikely(!new_data)) {
- - netdev->stats.rx_dropped++;
- - goto release_desc;
- - }
- - dma_addr = dma_map_single(eth->dma_dev,
- - new_data + NET_SKB_PAD +
- - eth->ip_align,
- - ring->buf_size,
- - DMA_FROM_DEVICE);
- - if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
- - skb_free_frag(new_data);
- - netdev->stats.rx_dropped++;
- - goto release_desc;
- - }
- + if (ring->page_pool) {
- + new_data = mtk_page_pool_get_buff(ring->page_pool,
- + &dma_addr,
- + GFP_ATOMIC);
- + if (unlikely(!new_data)) {
- + netdev->stats.rx_dropped++;
- + goto release_desc;
- + }
- + } else {
- + if (ring->frag_size <= PAGE_SIZE)
- + new_data = napi_alloc_frag(ring->frag_size);
- + else
- + new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
- +
- + if (unlikely(!new_data)) {
- + netdev->stats.rx_dropped++;
- + goto release_desc;
- + }
-
- - dma_unmap_single(eth->dma_dev, trxd.rxd1,
- - ring->buf_size, DMA_FROM_DEVICE);
- + dma_addr = dma_map_single(eth->dma_dev,
- + new_data + NET_SKB_PAD + eth->ip_align,
- + ring->buf_size, DMA_FROM_DEVICE);
- + if (unlikely(dma_mapping_error(eth->dma_dev,
- + dma_addr))) {
- + skb_free_frag(new_data);
- + netdev->stats.rx_dropped++;
- + goto release_desc;
- + }
- +
- + dma_unmap_single(eth->dma_dev, trxd.rxd1,
- + ring->buf_size, DMA_FROM_DEVICE);
- + }
-
- /* receive data */
- skb = build_skb(data, ring->frag_size);
- if (unlikely(!skb)) {
- - skb_free_frag(data);
- + mtk_rx_put_buff(ring, data, true);
- netdev->stats.rx_dropped++;
- goto skip_rx;
- }
- - skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- +
- + if (ring->page_pool) {
- + reserve_len = MTK_PP_HEADROOM;
- + skb_mark_for_recycle(skb);
- + } else {
- + reserve_len = NET_SKB_PAD + NET_IP_ALIGN;
- + }
- + skb_reserve(skb, reserve_len);
-
- pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
- skb->dev = netdev;
- @@ -1517,7 +1597,6 @@ static int mtk_poll_rx(struct napi_struc
- skip_rx:
- ring->data[idx] = new_data;
- rxd->rxd1 = (unsigned int)dma_addr;
- -
- release_desc:
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
- rxd->rxd2 = RX_DMA_LSO;
- @@ -1525,7 +1604,6 @@ release_desc:
- rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
-
- ring->calc_idx = idx;
- -
- done++;
- }
-
- @@ -1889,13 +1967,15 @@ static int mtk_rx_alloc(struct mtk_eth *
- if (!ring->data)
- return -ENOMEM;
-
- - for (i = 0; i < rx_dma_size; i++) {
- - if (ring->frag_size <= PAGE_SIZE)
- - ring->data[i] = netdev_alloc_frag(ring->frag_size);
- - else
- - ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
- - if (!ring->data[i])
- - return -ENOMEM;
- + if (!eth->hwlro) {
- + struct page_pool *pp;
- +
- + pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
- + rx_dma_size);
- + if (IS_ERR(pp))
- + return PTR_ERR(pp);
- +
- + ring->page_pool = pp;
- }
-
- ring->dma = dma_alloc_coherent(eth->dma_dev,
- @@ -1906,16 +1986,33 @@ static int mtk_rx_alloc(struct mtk_eth *
-
- for (i = 0; i < rx_dma_size; i++) {
- struct mtk_rx_dma_v2 *rxd;
- -
- - dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
- - ring->data[i] + NET_SKB_PAD + eth->ip_align,
- - ring->buf_size,
- - DMA_FROM_DEVICE);
- - if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
- - return -ENOMEM;
- + dma_addr_t dma_addr;
- + void *data;
-
- rxd = ring->dma + i * eth->soc->txrx.rxd_size;
- + if (ring->page_pool) {
- + data = mtk_page_pool_get_buff(ring->page_pool,
- + &dma_addr, GFP_KERNEL);
- + if (!data)
- + return -ENOMEM;
- + } else {
- + if (ring->frag_size <= PAGE_SIZE)
- + data = netdev_alloc_frag(ring->frag_size);
- + else
- + data = mtk_max_lro_buf_alloc(GFP_KERNEL);
- +
- + if (!data)
- + return -ENOMEM;
- +
- + dma_addr = dma_map_single(eth->dma_dev,
- + data + NET_SKB_PAD + eth->ip_align,
- + ring->buf_size, DMA_FROM_DEVICE);
- + if (unlikely(dma_mapping_error(eth->dma_dev,
- + dma_addr)))
- + return -ENOMEM;
- + }
- rxd->rxd1 = (unsigned int)dma_addr;
- + ring->data[i] = data;
-
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
- rxd->rxd2 = RX_DMA_LSO;
- @@ -1931,6 +2028,7 @@ static int mtk_rx_alloc(struct mtk_eth *
- rxd->rxd8 = 0;
- }
- }
- +
- ring->dma_size = rx_dma_size;
- ring->calc_idx_update = false;
- ring->calc_idx = rx_dma_size - 1;
- @@ -1982,7 +2080,7 @@ static void mtk_rx_clean(struct mtk_eth
-
- dma_unmap_single(eth->dma_dev, rxd->rxd1,
- ring->buf_size, DMA_FROM_DEVICE);
- - skb_free_frag(ring->data[i]);
- + mtk_rx_put_buff(ring, ring->data[i], false);
- }
- kfree(ring->data);
- ring->data = NULL;
- @@ -1994,6 +2092,13 @@ static void mtk_rx_clean(struct mtk_eth
- ring->dma, ring->phys);
- ring->dma = NULL;
- }
- +
- + if (ring->page_pool) {
- + if (xdp_rxq_info_is_reg(&ring->xdp_q))
- + xdp_rxq_info_unreg(&ring->xdp_q);
- + page_pool_destroy(ring->page_pool);
- + ring->page_pool = NULL;
- + }
- }
-
- static int mtk_hwlro_rx_init(struct mtk_eth *eth)
- --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
- +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
- @@ -18,6 +18,8 @@
- #include <linux/rhashtable.h>
- #include <linux/dim.h>
- #include <linux/bitfield.h>
- +#include <net/page_pool.h>
- +#include <linux/bpf_trace.h>
- #include "mtk_ppe.h"
-
- #define MTK_QDMA_PAGE_SIZE 2048
- @@ -49,6 +51,11 @@
- #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
- #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
-
- +#define MTK_PP_HEADROOM XDP_PACKET_HEADROOM
- +#define MTK_PP_PAD (MTK_PP_HEADROOM + \
- + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
- +#define MTK_PP_MAX_BUF_SIZE (PAGE_SIZE - MTK_PP_PAD)
- +
- #define MTK_QRX_OFFSET 0x10
-
- #define MTK_MAX_RX_RING_NUM 4
- @@ -743,6 +750,9 @@ struct mtk_rx_ring {
- bool calc_idx_update;
- u16 calc_idx;
- u32 crx_idx_reg;
- + /* page_pool */
- + struct page_pool *page_pool;
- + struct xdp_rxq_info xdp_q;
- };
-
- enum mkt_eth_capabilities {
|