|
|
@@ -0,0 +1,486 @@
|
|
|
+From: Felix Fietkau <[email protected]>
|
|
|
+Date: Tue, 15 Oct 2024 12:52:56 +0200
|
|
|
+Subject: [PATCH] net: ethernet: mtk_eth_soc: optimize dma ring address/index
|
|
|
+ calculation
|
|
|
+
|
|
|
+Since DMA descriptor sizes are all power of 2, we can avoid costly integer
|
|
|
+division in favor or simple shifts.
|
|
|
+
|
|
|
+Signed-off-by: Felix Fietkau <[email protected]>
|
|
|
+---
|
|
|
+
|
|
|
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
|
|
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
|
|
+@@ -43,6 +43,11 @@ MODULE_PARM_DESC(msg_level, "Message lev
|
|
|
+ offsetof(struct mtk_hw_stats, xdp_stats.x) / \
|
|
|
+ sizeof(u64) }
|
|
|
+
|
|
|
++#define RX_DESC_OFS(eth, i) \
|
|
|
++ ((i) << (eth)->soc->rx.desc_shift)
|
|
|
++#define TX_DESC_OFS(eth, i) \
|
|
|
++ ((i) << (eth)->soc->tx.desc_shift)
|
|
|
++
|
|
|
+ static const struct mtk_reg_map mtk_reg_map = {
|
|
|
+ .tx_irq_mask = 0x1a1c,
|
|
|
+ .tx_irq_status = 0x1a18,
|
|
|
+@@ -1160,14 +1165,14 @@ static int mtk_init_fq_dma(struct mtk_et
|
|
|
+ eth->scratch_ring = eth->sram_base;
|
|
|
+ else
|
|
|
+ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
|
|
|
+- cnt * soc->tx.desc_size,
|
|
|
++ TX_DESC_OFS(eth, cnt),
|
|
|
+ ð->phy_scratch_ring,
|
|
|
+ GFP_KERNEL);
|
|
|
+
|
|
|
+ if (unlikely(!eth->scratch_ring))
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+- phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
|
|
|
++ phy_ring_tail = eth->phy_scratch_ring + TX_DESC_OFS(eth, cnt - 1);
|
|
|
+
|
|
|
+ for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
|
|
|
+ len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
|
|
|
+@@ -1186,11 +1191,11 @@ static int mtk_init_fq_dma(struct mtk_et
|
|
|
+ for (i = 0; i < len; i++) {
|
|
|
+ struct mtk_tx_dma_v2 *txd;
|
|
|
+
|
|
|
+- txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
|
|
|
++ txd = eth->scratch_ring + TX_DESC_OFS(eth, j * MTK_FQ_DMA_LENGTH + i);
|
|
|
+ txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
|
|
|
+ if (j * MTK_FQ_DMA_LENGTH + i < cnt)
|
|
|
+ txd->txd2 = eth->phy_scratch_ring +
|
|
|
+- (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
|
|
|
++ TX_DESC_OFS(eth, j * MTK_FQ_DMA_LENGTH + i + 1);
|
|
|
+
|
|
|
+ txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
|
|
|
+ if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
|
|
|
+@@ -1220,9 +1225,9 @@ static void *mtk_qdma_phys_to_virt(struc
|
|
|
+ }
|
|
|
+
|
|
|
+ static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
|
|
|
+- void *txd, u32 txd_size)
|
|
|
++ void *txd, u32 txd_shift)
|
|
|
+ {
|
|
|
+- int idx = (txd - ring->dma) / txd_size;
|
|
|
++ int idx = (txd - ring->dma) >> txd_shift;
|
|
|
+
|
|
|
+ return &ring->buf[idx];
|
|
|
+ }
|
|
|
+@@ -1233,9 +1238,9 @@ static struct mtk_tx_dma *qdma_to_pdma(s
|
|
|
+ return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
|
|
|
+ }
|
|
|
+
|
|
|
+-static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
|
|
|
++static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_shift)
|
|
|
+ {
|
|
|
+- return (dma - ring->dma) / txd_size;
|
|
|
++ return (dma - ring->dma) >> txd_shift;
|
|
|
+ }
|
|
|
+
|
|
|
+ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
|
|
|
+@@ -1443,7 +1448,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
|
|
+ if (itxd == ring->last_free)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+- itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
|
|
|
++ itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_shift);
|
|
|
+ memset(itx_buf, 0, sizeof(*itx_buf));
|
|
|
+
|
|
|
+ txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
|
|
|
+@@ -1497,7 +1502,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
|
|
+ mtk_tx_set_dma_desc(dev, txd, &txd_info);
|
|
|
+
|
|
|
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
|
|
|
+- soc->tx.desc_size);
|
|
|
++ soc->tx.desc_shift);
|
|
|
+ if (new_desc)
|
|
|
+ memset(tx_buf, 0, sizeof(*tx_buf));
|
|
|
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
|
|
|
+@@ -1540,7 +1545,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
|
|
+ } else {
|
|
|
+ int next_idx;
|
|
|
+
|
|
|
+- next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
|
|
|
++ next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_shift),
|
|
|
+ ring->dma_size);
|
|
|
+ mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
|
|
|
+ }
|
|
|
+@@ -1549,7 +1554,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
|
|
+
|
|
|
+ err_dma:
|
|
|
+ do {
|
|
|
+- tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
|
|
|
++ tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_shift);
|
|
|
+
|
|
|
+ /* unmap dma */
|
|
|
+ mtk_tx_unmap(eth, tx_buf, NULL, false);
|
|
|
+@@ -1715,7 +1720,7 @@ static struct mtk_rx_ring *mtk_get_rx_ri
|
|
|
+
|
|
|
+ ring = ð->rx_ring[i];
|
|
|
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
|
|
|
+- rxd = ring->dma + idx * eth->soc->rx.desc_size;
|
|
|
++ rxd = ring->dma + RX_DESC_OFS(eth, idx);
|
|
|
+ if (rxd->rxd2 & RX_DMA_DONE) {
|
|
|
+ ring->calc_idx_update = true;
|
|
|
+ return ring;
|
|
|
+@@ -1883,7 +1888,7 @@ static int mtk_xdp_submit_frame(struct m
|
|
|
+ }
|
|
|
+ htxd = txd;
|
|
|
+
|
|
|
+- tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
|
|
|
++ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_shift);
|
|
|
+ memset(tx_buf, 0, sizeof(*tx_buf));
|
|
|
+ htx_buf = tx_buf;
|
|
|
+
|
|
|
+@@ -1902,7 +1907,7 @@ static int mtk_xdp_submit_frame(struct m
|
|
|
+ goto unmap;
|
|
|
+
|
|
|
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
|
|
|
+- soc->tx.desc_size);
|
|
|
++ soc->tx.desc_shift);
|
|
|
+ memset(tx_buf, 0, sizeof(*tx_buf));
|
|
|
+ n_desc++;
|
|
|
+ }
|
|
|
+@@ -1940,7 +1945,7 @@ static int mtk_xdp_submit_frame(struct m
|
|
|
+ } else {
|
|
|
+ int idx;
|
|
|
+
|
|
|
+- idx = txd_to_idx(ring, txd, soc->tx.desc_size);
|
|
|
++ idx = txd_to_idx(ring, txd, soc->tx.desc_shift);
|
|
|
+ mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
|
|
|
+ MT7628_TX_CTX_IDX0);
|
|
|
+ }
|
|
|
+@@ -1951,7 +1956,7 @@ static int mtk_xdp_submit_frame(struct m
|
|
|
+
|
|
|
+ unmap:
|
|
|
+ while (htxd != txd) {
|
|
|
+- tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
|
|
|
++ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_shift);
|
|
|
+ mtk_tx_unmap(eth, tx_buf, NULL, false);
|
|
|
+
|
|
|
+ htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
|
|
|
+@@ -2083,7 +2088,7 @@ static int mtk_poll_rx(struct napi_struc
|
|
|
+ goto rx_done;
|
|
|
+
|
|
|
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
|
|
|
+- rxd = ring->dma + idx * eth->soc->rx.desc_size;
|
|
|
++ rxd = ring->dma + RX_DESC_OFS(eth, idx);
|
|
|
+ data = ring->data[idx];
|
|
|
+
|
|
|
+ if (!mtk_rx_get_desc(eth, &trxd, rxd))
|
|
|
+@@ -2347,7 +2352,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
|
|
|
+ break;
|
|
|
+
|
|
|
+ tx_buf = mtk_desc_to_tx_buf(ring, desc,
|
|
|
+- eth->soc->tx.desc_size);
|
|
|
++ eth->soc->tx.desc_shift);
|
|
|
+ if (!tx_buf->data)
|
|
|
+ break;
|
|
|
+
|
|
|
+@@ -2398,7 +2403,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
|
|
|
+ }
|
|
|
+ mtk_tx_unmap(eth, tx_buf, &bq, true);
|
|
|
+
|
|
|
+- desc = ring->dma + cpu * eth->soc->tx.desc_size;
|
|
|
++ desc = ring->dma + TX_DESC_OFS(eth, cpu);
|
|
|
+ ring->last_free = desc;
|
|
|
+ atomic_inc(&ring->free_count);
|
|
|
+
|
|
|
+@@ -2516,7 +2521,7 @@ static int mtk_tx_alloc(struct mtk_eth *
|
|
|
+ {
|
|
|
+ const struct mtk_soc_data *soc = eth->soc;
|
|
|
+ struct mtk_tx_ring *ring = ð->tx_ring;
|
|
|
+- int i, sz = soc->tx.desc_size;
|
|
|
++ int i, sz = TX_DESC_OFS(eth, 1);
|
|
|
+ struct mtk_tx_dma_v2 *txd;
|
|
|
+ int ring_size;
|
|
|
+ u32 ofs, val;
|
|
|
+@@ -2563,7 +2568,7 @@ static int mtk_tx_alloc(struct mtk_eth *
|
|
|
+ * descriptors in ring->dma_pdma.
|
|
|
+ */
|
|
|
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
|
|
|
+- ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
|
|
|
++ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, TX_DESC_OFS(eth, ring_size),
|
|
|
+ &ring->phys_pdma, GFP_KERNEL);
|
|
|
+ if (!ring->dma_pdma)
|
|
|
+ goto no_tx_mem;
|
|
|
+@@ -2578,7 +2583,7 @@ static int mtk_tx_alloc(struct mtk_eth *
|
|
|
+ atomic_set(&ring->free_count, ring_size - 2);
|
|
|
+ ring->next_free = ring->dma;
|
|
|
+ ring->last_free = (void *)txd;
|
|
|
+- ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
|
|
|
++ ring->last_free_ptr = (u32)(ring->phys + TX_DESC_OFS(eth, ring_size - 1));
|
|
|
+ ring->thresh = MAX_SKB_FRAGS;
|
|
|
+
|
|
|
+ /* make sure that all changes to the dma ring are flushed before we
|
|
|
+@@ -2590,7 +2595,7 @@ static int mtk_tx_alloc(struct mtk_eth *
|
|
|
+ mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
|
|
|
+ mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
|
|
|
+ mtk_w32(eth,
|
|
|
+- ring->phys + ((ring_size - 1) * sz),
|
|
|
++ ring->phys + TX_DESC_OFS(eth, ring_size - 1),
|
|
|
+ soc->reg_map->qdma.crx_ptr);
|
|
|
+ mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
|
|
|
+
|
|
|
+@@ -2639,14 +2644,14 @@ static void mtk_tx_clean(struct mtk_eth
|
|
|
+ }
|
|
|
+ if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
|
|
|
+ dma_free_coherent(eth->dma_dev,
|
|
|
+- ring->dma_size * soc->tx.desc_size,
|
|
|
++ TX_DESC_OFS(eth, ring->dma_size),
|
|
|
+ ring->dma, ring->phys);
|
|
|
+ ring->dma = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (ring->dma_pdma) {
|
|
|
+ dma_free_coherent(eth->dma_dev,
|
|
|
+- ring->dma_size * soc->tx.desc_size,
|
|
|
++ TX_DESC_OFS(eth, ring->dma_size),
|
|
|
+ ring->dma_pdma, ring->phys_pdma);
|
|
|
+ ring->dma_pdma = NULL;
|
|
|
+ }
|
|
|
+@@ -2702,15 +2707,13 @@ static int mtk_rx_alloc(struct mtk_eth *
|
|
|
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
|
|
|
+ rx_flag != MTK_RX_FLAGS_NORMAL) {
|
|
|
+ ring->dma = dma_alloc_coherent(eth->dma_dev,
|
|
|
+- rx_dma_size * eth->soc->rx.desc_size,
|
|
|
++ RX_DESC_OFS(eth, rx_dma_size),
|
|
|
+ &ring->phys, GFP_KERNEL);
|
|
|
+ } else {
|
|
|
+ struct mtk_tx_ring *tx_ring = ð->tx_ring;
|
|
|
+
|
|
|
+- ring->dma = tx_ring->dma + tx_ring_size *
|
|
|
+- eth->soc->tx.desc_size * (ring_no + 1);
|
|
|
+- ring->phys = tx_ring->phys + tx_ring_size *
|
|
|
+- eth->soc->tx.desc_size * (ring_no + 1);
|
|
|
++ ring->dma = tx_ring->dma + TX_DESC_OFS(eth, tx_ring_size * (ring_no + 1));
|
|
|
++ ring->phys = tx_ring->phys + TX_DESC_OFS(eth, tx_ring_size * (ring_no + 1));
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!ring->dma)
|
|
|
+@@ -2721,7 +2724,7 @@ static int mtk_rx_alloc(struct mtk_eth *
|
|
|
+ dma_addr_t dma_addr;
|
|
|
+ void *data;
|
|
|
+
|
|
|
+- rxd = ring->dma + i * eth->soc->rx.desc_size;
|
|
|
++ rxd = ring->dma + RX_DESC_OFS(eth, i);
|
|
|
+ if (ring->page_pool) {
|
|
|
+ data = mtk_page_pool_get_buff(ring->page_pool,
|
|
|
+ &dma_addr, GFP_KERNEL);
|
|
|
+@@ -2812,7 +2815,7 @@ static void mtk_rx_clean(struct mtk_eth
|
|
|
+ if (!ring->data[i])
|
|
|
+ continue;
|
|
|
+
|
|
|
+- rxd = ring->dma + i * eth->soc->rx.desc_size;
|
|
|
++ rxd = ring->dma + RX_DESC_OFS(eth, i);
|
|
|
+ if (!rxd->rxd1)
|
|
|
+ continue;
|
|
|
+
|
|
|
+@@ -2829,7 +2832,7 @@ static void mtk_rx_clean(struct mtk_eth
|
|
|
+
|
|
|
+ if (!in_sram && ring->dma) {
|
|
|
+ dma_free_coherent(eth->dma_dev,
|
|
|
+- ring->dma_size * eth->soc->rx.desc_size,
|
|
|
++ RX_DESC_OFS(eth, ring->dma_size),
|
|
|
+ ring->dma, ring->phys);
|
|
|
+ ring->dma = NULL;
|
|
|
+ }
|
|
|
+@@ -3200,7 +3203,7 @@ static void mtk_dma_free(struct mtk_eth
|
|
|
+
|
|
|
+ if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
|
|
|
+ dma_free_coherent(eth->dma_dev,
|
|
|
+- MTK_QDMA_RING_SIZE * soc->tx.desc_size,
|
|
|
++ TX_DESC_OFS(eth, MTK_QDMA_RING_SIZE),
|
|
|
+ eth->scratch_ring, eth->phy_scratch_ring);
|
|
|
+ eth->scratch_ring = NULL;
|
|
|
+ eth->phy_scratch_ring = 0;
|
|
|
+@@ -5228,6 +5231,9 @@ static void mtk_remove(struct platform_d
|
|
|
+ mtk_mdio_cleanup(eth);
|
|
|
+ }
|
|
|
+
|
|
|
++#define DESC_SIZE(struct_name) \
|
|
|
++ .desc_shift = const_ilog2(sizeof(struct_name))
|
|
|
++
|
|
|
+ static const struct mtk_soc_data mt2701_data = {
|
|
|
+ .reg_map = &mtk_reg_map,
|
|
|
+ .caps = MT7623_CAPS | MTK_HWLRO,
|
|
|
+@@ -5236,14 +5242,14 @@ static const struct mtk_soc_data mt2701_
|
|
|
+ .required_pctl = true,
|
|
|
+ .version = 1,
|
|
|
+ .tx = {
|
|
|
+- .desc_size = sizeof(struct mtk_tx_dma),
|
|
|
++ DESC_SIZE(struct mtk_tx_dma),
|
|
|
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
|
|
|
+ .dma_len_offset = 16,
|
|
|
+ .dma_size = MTK_DMA_SIZE(2K),
|
|
|
+ .fq_dma_size = MTK_DMA_SIZE(2K),
|
|
|
+ },
|
|
|
+ .rx = {
|
|
|
+- .desc_size = sizeof(struct mtk_rx_dma),
|
|
|
++ DESC_SIZE(struct mtk_rx_dma),
|
|
|
+ .irq_done_mask = MTK_RX_DONE_INT,
|
|
|
+ .dma_l4_valid = RX_DMA_L4_VALID,
|
|
|
+ .dma_size = MTK_DMA_SIZE(2K),
|
|
|
+@@ -5264,14 +5270,14 @@ static const struct mtk_soc_data mt7621_
|
|
|
+ .hash_offset = 2,
|
|
|
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
|
|
|
+ .tx = {
|
|
|
+- .desc_size = sizeof(struct mtk_tx_dma),
|
|
|
++ DESC_SIZE(struct mtk_tx_dma),
|
|
|
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
|
|
|
+ .dma_len_offset = 16,
|
|
|
+ .dma_size = MTK_DMA_SIZE(2K),
|
|
|
+ .fq_dma_size = MTK_DMA_SIZE(2K),
|
|
|
+ },
|
|
|
+ .rx = {
|
|
|
+- .desc_size = sizeof(struct mtk_rx_dma),
|
|
|
++ DESC_SIZE(struct mtk_rx_dma),
|
|
|
+ .irq_done_mask = MTK_RX_DONE_INT,
|
|
|
+ .dma_l4_valid = RX_DMA_L4_VALID,
|
|
|
+ .dma_size = MTK_DMA_SIZE(2K),
|
|
|
+@@ -5294,14 +5300,14 @@ static const struct mtk_soc_data mt7622_
|
|
|
+ .has_accounting = true,
|
|
|
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
|
|
|
+ .tx = {
|
|
|
+- .desc_size = sizeof(struct mtk_tx_dma),
|
|
|
++ DESC_SIZE(struct mtk_tx_dma),
|
|
|
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
|
|
|
+ .dma_len_offset = 16,
|
|
|
+ .dma_size = MTK_DMA_SIZE(2K),
|
|
|
+ .fq_dma_size = MTK_DMA_SIZE(2K),
|
|
|
+ },
|
|
|
+ .rx = {
|
|
|
+- .desc_size = sizeof(struct mtk_rx_dma),
|
|
|
++ DESC_SIZE(struct mtk_rx_dma),
|
|
|
+ .irq_done_mask = MTK_RX_DONE_INT,
|
|
|
+ .dma_l4_valid = RX_DMA_L4_VALID,
|
|
|
+ .dma_size = MTK_DMA_SIZE(2K),
|
|
|
+@@ -5323,14 +5329,14 @@ static const struct mtk_soc_data mt7623_
|
|
|
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
|
|
|
+ .disable_pll_modes = true,
|
|
|
+ .tx = {
|
|
|
+- .desc_size = sizeof(struct mtk_tx_dma),
|
|
|
++ DESC_SIZE(struct mtk_tx_dma),
|
|
|
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
|
|
|
+ .dma_len_offset = 16,
|
|
|
+ .dma_size = MTK_DMA_SIZE(2K),
|
|
|
+ .fq_dma_size = MTK_DMA_SIZE(2K),
|
|
|
+ },
|
|
|
+ .rx = {
|
|
|
+- .desc_size = sizeof(struct mtk_rx_dma),
|
|
|
++ DESC_SIZE(struct mtk_rx_dma),
|
|
|
+ .irq_done_mask = MTK_RX_DONE_INT,
|
|
|
+ .dma_l4_valid = RX_DMA_L4_VALID,
|
|
|
+ .dma_size = MTK_DMA_SIZE(2K),
|
|
|
+@@ -5349,14 +5355,14 @@ static const struct mtk_soc_data mt7629_
|
|
|
+ .has_accounting = true,
|
|
|
+ .version = 1,
|
|
|
+ .tx = {
|
|
|
+- .desc_size = sizeof(struct mtk_tx_dma),
|
|
|
++ DESC_SIZE(struct mtk_tx_dma),
|
|
|
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
|
|
|
+ .dma_len_offset = 16,
|
|
|
+ .dma_size = MTK_DMA_SIZE(2K),
|
|
|
+ .fq_dma_size = MTK_DMA_SIZE(2K),
|
|
|
+ },
|
|
|
+ .rx = {
|
|
|
+- .desc_size = sizeof(struct mtk_rx_dma),
|
|
|
++ DESC_SIZE(struct mtk_rx_dma),
|
|
|
+ .irq_done_mask = MTK_RX_DONE_INT,
|
|
|
+ .dma_l4_valid = RX_DMA_L4_VALID,
|
|
|
+ .dma_size = MTK_DMA_SIZE(2K),
|
|
|
+@@ -5379,14 +5385,14 @@ static const struct mtk_soc_data mt7981_
|
|
|
+ .has_accounting = true,
|
|
|
+ .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
|
|
|
+ .tx = {
|
|
|
+- .desc_size = sizeof(struct mtk_tx_dma_v2),
|
|
|
++ DESC_SIZE(struct mtk_tx_dma_v2),
|
|
|
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
|
|
+ .dma_len_offset = 8,
|
|
|
+ .dma_size = MTK_DMA_SIZE(2K),
|
|
|
+ .fq_dma_size = MTK_DMA_SIZE(2K),
|
|
|
+ },
|
|
|
+ .rx = {
|
|
|
+- .desc_size = sizeof(struct mtk_rx_dma),
|
|
|
++ DESC_SIZE(struct mtk_rx_dma),
|
|
|
+ .irq_done_mask = MTK_RX_DONE_INT,
|
|
|
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
|
|
|
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
|
|
|
+@@ -5409,14 +5415,14 @@ static const struct mtk_soc_data mt7986_
|
|
|
+ .has_accounting = true,
|
|
|
+ .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
|
|
|
+ .tx = {
|
|
|
+- .desc_size = sizeof(struct mtk_tx_dma_v2),
|
|
|
++ DESC_SIZE(struct mtk_tx_dma_v2),
|
|
|
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
|
|
+ .dma_len_offset = 8,
|
|
|
+ .dma_size = MTK_DMA_SIZE(2K),
|
|
|
+ .fq_dma_size = MTK_DMA_SIZE(2K),
|
|
|
+ },
|
|
|
+ .rx = {
|
|
|
+- .desc_size = sizeof(struct mtk_rx_dma),
|
|
|
++ DESC_SIZE(struct mtk_rx_dma),
|
|
|
+ .irq_done_mask = MTK_RX_DONE_INT,
|
|
|
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
|
|
|
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
|
|
|
+@@ -5439,14 +5445,14 @@ static const struct mtk_soc_data mt7988_
|
|
|
+ .has_accounting = true,
|
|
|
+ .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
|
|
|
+ .tx = {
|
|
|
+- .desc_size = sizeof(struct mtk_tx_dma_v2),
|
|
|
++ DESC_SIZE(struct mtk_tx_dma_v2),
|
|
|
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
|
|
+ .dma_len_offset = 8,
|
|
|
+ .dma_size = MTK_DMA_SIZE(2K),
|
|
|
+ .fq_dma_size = MTK_DMA_SIZE(4K),
|
|
|
+ },
|
|
|
+ .rx = {
|
|
|
+- .desc_size = sizeof(struct mtk_rx_dma_v2),
|
|
|
++ DESC_SIZE(struct mtk_rx_dma_v2),
|
|
|
+ .irq_done_mask = MTK_RX_DONE_INT_V2,
|
|
|
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
|
|
|
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
|
|
+@@ -5463,13 +5469,13 @@ static const struct mtk_soc_data rt5350_
|
|
|
+ .required_pctl = false,
|
|
|
+ .version = 1,
|
|
|
+ .tx = {
|
|
|
+- .desc_size = sizeof(struct mtk_tx_dma),
|
|
|
++ DESC_SIZE(struct mtk_tx_dma),
|
|
|
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
|
|
|
+ .dma_len_offset = 16,
|
|
|
+ .dma_size = MTK_DMA_SIZE(2K),
|
|
|
+ },
|
|
|
+ .rx = {
|
|
|
+- .desc_size = sizeof(struct mtk_rx_dma),
|
|
|
++ DESC_SIZE(struct mtk_rx_dma),
|
|
|
+ .irq_done_mask = MTK_RX_DONE_INT,
|
|
|
+ .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
|
|
|
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
|
|
|
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
|
|
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
|
|
+@@ -1160,7 +1160,7 @@ struct mtk_reg_map {
|
|
|
+ * @foe_entry_size Foe table entry size.
|
|
|
+ * @has_accounting Bool indicating support for accounting of
|
|
|
+ * offloaded flows.
|
|
|
+- * @desc_size Tx/Rx DMA descriptor size.
|
|
|
++ * @desc_shift Tx/Rx DMA descriptor size (in power-of-2).
|
|
|
+ * @irq_done_mask Rx irq done register mask.
|
|
|
+ * @dma_l4_valid Rx DMA valid register mask.
|
|
|
+ * @dma_max_len Max DMA tx/rx buffer length.
|
|
|
+@@ -1181,14 +1181,14 @@ struct mtk_soc_data {
|
|
|
+ bool has_accounting;
|
|
|
+ bool disable_pll_modes;
|
|
|
+ struct {
|
|
|
+- u32 desc_size;
|
|
|
++ u32 desc_shift;
|
|
|
+ u32 dma_max_len;
|
|
|
+ u32 dma_len_offset;
|
|
|
+ u32 dma_size;
|
|
|
+ u32 fq_dma_size;
|
|
|
+ } tx;
|
|
|
+ struct {
|
|
|
+- u32 desc_size;
|
|
|
++ u32 desc_shift;
|
|
|
+ u32 irq_done_mask;
|
|
|
+ u32 dma_l4_valid;
|
|
|
+ u32 dma_max_len;
|