123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917 |
- From: Lorenzo Bianconi <[email protected]>
- Date: Fri, 20 May 2022 20:11:36 +0200
- Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce MTK_NETSYS_V2 support
- Introduce MTK_NETSYS_V2 support. MTK_NETSYS_V2 defines 32B TX/RX DMA
- descriptors.
- This is a preliminary patch to add mt7986 ethernet support.
- Tested-by: Sam Shih <[email protected]>
- Signed-off-by: Lorenzo Bianconi <[email protected]>
- Signed-off-by: David S. Miller <[email protected]>
- ---
- --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
- +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
- @@ -873,8 +873,8 @@ static inline int mtk_max_buf_size(int f
- return buf_size;
- }
-
- -static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
- - struct mtk_rx_dma *dma_rxd)
- +static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
- + struct mtk_rx_dma_v2 *dma_rxd)
- {
- rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
- if (!(rxd->rxd2 & RX_DMA_DONE))
- @@ -883,6 +883,10 @@ static inline bool mtk_rx_get_desc(struc
- rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
- rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
- rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
- + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
- + rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
- + rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
- + }
-
- return true;
- }
- @@ -927,7 +931,7 @@ static int mtk_init_fq_dma(struct mtk_et
- phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
-
- for (i = 0; i < cnt; i++) {
- - struct mtk_tx_dma *txd;
- + struct mtk_tx_dma_v2 *txd;
-
- txd = (void *)eth->scratch_ring + i * soc->txrx.txd_size;
- txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
- @@ -937,6 +941,12 @@ static int mtk_init_fq_dma(struct mtk_et
-
- txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
- txd->txd4 = 0;
- + if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
- + txd->txd5 = 0;
- + txd->txd6 = 0;
- + txd->txd7 = 0;
- + txd->txd8 = 0;
- + }
- }
-
- mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
- @@ -1040,10 +1050,12 @@ static void setup_tx_buf(struct mtk_eth
- }
- }
-
- -static void mtk_tx_set_dma_desc(struct net_device *dev, struct mtk_tx_dma *desc,
- - struct mtk_tx_dma_desc_info *info)
- +static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
- + struct mtk_tx_dma_desc_info *info)
- {
- struct mtk_mac *mac = netdev_priv(dev);
- + struct mtk_eth *eth = mac->hw;
- + struct mtk_tx_dma *desc = txd;
- u32 data;
-
- WRITE_ONCE(desc->txd1, info->addr);
- @@ -1067,6 +1079,59 @@ static void mtk_tx_set_dma_desc(struct n
- WRITE_ONCE(desc->txd4, data);
- }
-
- +static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
- + struct mtk_tx_dma_desc_info *info)
- +{
- + struct mtk_mac *mac = netdev_priv(dev);
- + struct mtk_tx_dma_v2 *desc = txd;
- + struct mtk_eth *eth = mac->hw;
- + u32 data;
- +
- + WRITE_ONCE(desc->txd1, info->addr);
- +
- + data = TX_DMA_PLEN0(info->size);
- + if (info->last)
- + data |= TX_DMA_LS0;
- + WRITE_ONCE(desc->txd3, data);
- +
- + if (!info->qid && mac->id)
- + info->qid = MTK_QDMA_GMAC2_QID;
- +
- + data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
- + data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
- + WRITE_ONCE(desc->txd4, data);
- +
- + data = 0;
- + if (info->first) {
- + if (info->gso)
- + data |= TX_DMA_TSO_V2;
- + /* tx checksum offload */
- + if (info->csum)
- + data |= TX_DMA_CHKSUM_V2;
- + }
- + WRITE_ONCE(desc->txd5, data);
- +
- + data = 0;
- + if (info->first && info->vlan)
- + data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
- + WRITE_ONCE(desc->txd6, data);
- +
- + WRITE_ONCE(desc->txd7, 0);
- + WRITE_ONCE(desc->txd8, 0);
- +}
- +
- +static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
- + struct mtk_tx_dma_desc_info *info)
- +{
- + struct mtk_mac *mac = netdev_priv(dev);
- + struct mtk_eth *eth = mac->hw;
- +
- + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
- + mtk_tx_set_dma_desc_v2(dev, txd, info);
- + else
- + mtk_tx_set_dma_desc_v1(dev, txd, info);
- +}
- +
- static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
- int tx_num, struct mtk_tx_ring *ring, bool gso)
- {
- @@ -1075,6 +1140,7 @@ static int mtk_tx_map(struct sk_buff *sk
- .gso = gso,
- .csum = skb->ip_summed == CHECKSUM_PARTIAL,
- .vlan = skb_vlan_tag_present(skb),
- + .qid = skb->mark & MTK_QDMA_TX_MASK,
- .vlan_tci = skb_vlan_tag_get(skb),
- .first = true,
- .last = !skb_is_nonlinear(skb),
- @@ -1134,7 +1200,9 @@ static int mtk_tx_map(struct sk_buff *sk
- }
-
- memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
- - txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN);
- + txd_info.size = min_t(unsigned int, frag_size,
- + soc->txrx.dma_max_len);
- + txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
- txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
- !(frag_size - txd_info.size);
- txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
- @@ -1215,17 +1283,16 @@ err_dma:
- return -ENOMEM;
- }
-
- -static inline int mtk_cal_txd_req(struct sk_buff *skb)
- +static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
- {
- - int i, nfrags;
- + int i, nfrags = 1;
- skb_frag_t *frag;
-
- - nfrags = 1;
- if (skb_is_gso(skb)) {
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- frag = &skb_shinfo(skb)->frags[i];
- nfrags += DIV_ROUND_UP(skb_frag_size(frag),
- - MTK_TX_DMA_BUF_LEN);
- + eth->soc->txrx.dma_max_len);
- }
- } else {
- nfrags += skb_shinfo(skb)->nr_frags;
- @@ -1277,7 +1344,7 @@ static netdev_tx_t mtk_start_xmit(struct
- if (unlikely(test_bit(MTK_RESETTING, ð->state)))
- goto drop;
-
- - tx_num = mtk_cal_txd_req(skb);
- + tx_num = mtk_cal_txd_req(eth, skb);
- if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
- netif_stop_queue(dev);
- netif_err(eth, tx_queued, dev,
- @@ -1369,7 +1436,7 @@ static int mtk_poll_rx(struct napi_struc
- int idx;
- struct sk_buff *skb;
- u8 *data, *new_data;
- - struct mtk_rx_dma *rxd, trxd;
- + struct mtk_rx_dma_v2 *rxd, trxd;
- int done = 0, bytes = 0;
-
- while (done < budget) {
- @@ -1377,7 +1444,7 @@ static int mtk_poll_rx(struct napi_struc
- unsigned int pktlen;
- dma_addr_t dma_addr;
- u32 hash, reason;
- - int mac;
- + int mac = 0;
-
- ring = mtk_get_rx_ring(eth);
- if (unlikely(!ring))
- @@ -1387,16 +1454,15 @@ static int mtk_poll_rx(struct napi_struc
- rxd = (void *)ring->dma + idx * eth->soc->txrx.rxd_size;
- data = ring->data[idx];
-
- - if (!mtk_rx_get_desc(&trxd, rxd))
- + if (!mtk_rx_get_desc(eth, &trxd, rxd))
- break;
-
- /* find out which mac the packet come from. values start at 1 */
- - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) ||
- - (trxd.rxd4 & RX_DMA_SPECIAL_TAG))
- - mac = 0;
- - else
- - mac = ((trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
- - RX_DMA_FPORT_MASK) - 1;
- + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
- + mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
- + else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
- + !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
- + mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
-
- if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
- !eth->netdev[mac]))
- @@ -1442,7 +1508,7 @@ static int mtk_poll_rx(struct napi_struc
- pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
- skb->dev = netdev;
- skb_put(skb, pktlen);
- - if (trxd.rxd4 & eth->rx_dma_l4_valid)
- + if (trxd.rxd4 & eth->soc->txrx.rx_dma_l4_valid)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb_checksum_none_assert(skb);
- @@ -1460,10 +1526,25 @@ static int mtk_poll_rx(struct napi_struc
- mtk_ppe_check_skb(eth->ppe, skb,
- trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
-
- - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
- - (trxd.rxd2 & RX_DMA_VTAG))
- - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- - RX_DMA_VID(trxd.rxd3));
- + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
- + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
- + if (trxd.rxd3 & RX_DMA_VTAG_V2)
- + __vlan_hwaccel_put_tag(skb,
- + htons(RX_DMA_VPID(trxd.rxd4)),
- + RX_DMA_VID(trxd.rxd4));
- + } else if (trxd.rxd2 & RX_DMA_VTAG) {
- + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- + RX_DMA_VID(trxd.rxd3));
- + }
- +
- + /* If the device is attached to a dsa switch, the special
- + * tag inserted in VLAN field by hw switch can * be offloaded
- + * by RX HW VLAN offload. Clear vlan info.
- + */
- + if (netdev_uses_dsa(netdev))
- + __vlan_hwaccel_clear_tag(skb);
- + }
- +
- skb_record_rx_queue(skb, 0);
- napi_gro_receive(napi, skb);
-
- @@ -1475,7 +1556,7 @@ release_desc:
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
- rxd->rxd2 = RX_DMA_LSO;
- else
- - rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
- + rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
-
- ring->calc_idx = idx;
-
- @@ -1677,7 +1758,8 @@ static int mtk_napi_rx(struct napi_struc
- do {
- int rx_done;
-
- - mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.irq_status);
- + mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
- + reg_map->pdma.irq_status);
- rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
- rx_done_total += rx_done;
-
- @@ -1691,10 +1773,11 @@ static int mtk_napi_rx(struct napi_struc
- if (rx_done_total == budget)
- return budget;
-
- - } while (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT);
- + } while (mtk_r32(eth, reg_map->pdma.irq_status) &
- + eth->soc->txrx.rx_irq_done_mask);
-
- if (napi_complete_done(napi, rx_done_total))
- - mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
- + mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
-
- return rx_done_total;
- }
- @@ -1704,7 +1787,7 @@ static int mtk_tx_alloc(struct mtk_eth *
- const struct mtk_soc_data *soc = eth->soc;
- struct mtk_tx_ring *ring = ð->tx_ring;
- int i, sz = soc->txrx.txd_size;
- - struct mtk_tx_dma *txd;
- + struct mtk_tx_dma_v2 *txd;
-
- ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
- GFP_KERNEL);
- @@ -1724,13 +1807,19 @@ static int mtk_tx_alloc(struct mtk_eth *
- txd->txd2 = next_ptr;
- txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
- txd->txd4 = 0;
- + if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
- + txd->txd5 = 0;
- + txd->txd6 = 0;
- + txd->txd7 = 0;
- + txd->txd8 = 0;
- + }
- }
-
- /* On MT7688 (PDMA only) this driver uses the ring->dma structs
- * only as the framework. The real HW descriptors are the PDMA
- * descriptors in ring->dma_pdma.
- */
- - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
- ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
- &ring->phys_pdma, GFP_KERNEL);
- if (!ring->dma_pdma)
- @@ -1810,13 +1899,11 @@ static int mtk_rx_alloc(struct mtk_eth *
- struct mtk_rx_ring *ring;
- int rx_data_len, rx_dma_size;
- int i;
- - u32 offset = 0;
-
- if (rx_flag == MTK_RX_FLAGS_QDMA) {
- if (ring_no)
- return -EINVAL;
- ring = ð->rx_ring_qdma;
- - offset = 0x1000;
- } else {
- ring = ð->rx_ring[ring_no];
- }
- @@ -1852,7 +1939,7 @@ static int mtk_rx_alloc(struct mtk_eth *
- return -ENOMEM;
-
- for (i = 0; i < rx_dma_size; i++) {
- - struct mtk_rx_dma *rxd;
- + struct mtk_rx_dma_v2 *rxd;
-
- dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
- ring->data[i] + NET_SKB_PAD + eth->ip_align,
- @@ -1867,26 +1954,47 @@ static int mtk_rx_alloc(struct mtk_eth *
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
- rxd->rxd2 = RX_DMA_LSO;
- else
- - rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
- + rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
-
- rxd->rxd3 = 0;
- rxd->rxd4 = 0;
- + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
- + rxd->rxd5 = 0;
- + rxd->rxd6 = 0;
- + rxd->rxd7 = 0;
- + rxd->rxd8 = 0;
- + }
- }
- ring->dma_size = rx_dma_size;
- ring->calc_idx_update = false;
- ring->calc_idx = rx_dma_size - 1;
- - ring->crx_idx_reg = reg_map->pdma.pcrx_ptr + ring_no * MTK_QRX_OFFSET;
- + if (rx_flag == MTK_RX_FLAGS_QDMA)
- + ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
- + ring_no * MTK_QRX_OFFSET;
- + else
- + ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
- + ring_no * MTK_QRX_OFFSET;
- /* make sure that all changes to the dma ring are flushed before we
- * continue
- */
- wmb();
-
- - mtk_w32(eth, ring->phys,
- - reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET + offset);
- - mtk_w32(eth, rx_dma_size,
- - reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET + offset);
- - mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
- - mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), reg_map->pdma.rst_idx + offset);
- + if (rx_flag == MTK_RX_FLAGS_QDMA) {
- + mtk_w32(eth, ring->phys,
- + reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
- + mtk_w32(eth, rx_dma_size,
- + reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
- + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
- + reg_map->qdma.rst_idx);
- + } else {
- + mtk_w32(eth, ring->phys,
- + reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
- + mtk_w32(eth, rx_dma_size,
- + reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
- + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
- + reg_map->pdma.rst_idx);
- + }
- + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
-
- return 0;
- }
- @@ -2311,7 +2419,7 @@ static irqreturn_t mtk_handle_irq_rx(int
- eth->rx_events++;
- if (likely(napi_schedule_prep(ð->rx_napi))) {
- __napi_schedule(ð->rx_napi);
- - mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
- + mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
- }
-
- return IRQ_HANDLED;
- @@ -2335,8 +2443,10 @@ static irqreturn_t mtk_handle_irq(int ir
- struct mtk_eth *eth = _eth;
- const struct mtk_reg_map *reg_map = eth->soc->reg_map;
-
- - if (mtk_r32(eth, reg_map->pdma.irq_mask) & MTK_RX_DONE_INT) {
- - if (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT)
- + if (mtk_r32(eth, reg_map->pdma.irq_mask) &
- + eth->soc->txrx.rx_irq_done_mask) {
- + if (mtk_r32(eth, reg_map->pdma.irq_status) &
- + eth->soc->txrx.rx_irq_done_mask)
- mtk_handle_irq_rx(irq, _eth);
- }
- if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
- @@ -2354,16 +2464,16 @@ static void mtk_poll_controller(struct n
- struct mtk_eth *eth = mac->hw;
-
- mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
- - mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
- + mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
- mtk_handle_irq_rx(eth->irq[2], dev);
- mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- - mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
- + mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
- }
- #endif
-
- static int mtk_start_dma(struct mtk_eth *eth)
- {
- - u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
- + u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
- const struct mtk_reg_map *reg_map = eth->soc->reg_map;
- int err;
-
- @@ -2374,12 +2484,19 @@ static int mtk_start_dma(struct mtk_eth
- }
-
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- - mtk_w32(eth,
- - MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
- - MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
- - MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
- - MTK_RX_BT_32DWORDS,
- - reg_map->qdma.glo_cfg);
- + val = mtk_r32(eth, reg_map->qdma.glo_cfg);
- + val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
- + MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
- + MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
- +
- + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
- + val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
- + MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
- + MTK_CHK_DDONE_EN;
- + else
- + val |= MTK_RX_BT_32DWORDS;
- + mtk_w32(eth, val, reg_map->qdma.glo_cfg);
- +
- mtk_w32(eth,
- MTK_RX_DMA_EN | rx_2b_offset |
- MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
- @@ -2453,7 +2570,7 @@ static int mtk_open(struct net_device *d
- napi_enable(ð->tx_napi);
- napi_enable(ð->rx_napi);
- mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- - mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
- + mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
- refcount_set(ð->dma_refcnt, 1);
- }
- else
- @@ -2505,7 +2622,7 @@ static int mtk_stop(struct net_device *d
- mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
-
- mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
- - mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
- + mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
- napi_disable(ð->tx_napi);
- napi_disable(ð->rx_napi);
-
- @@ -2665,9 +2782,25 @@ static int mtk_hw_init(struct mtk_eth *e
- return 0;
- }
-
- - /* Non-MT7628 handling... */
- - ethsys_reset(eth, RSTCTRL_FE);
- - ethsys_reset(eth, RSTCTRL_PPE);
- + val = RSTCTRL_FE | RSTCTRL_PPE;
- + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
- + regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
- +
- + val |= RSTCTRL_ETH;
- + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
- + val |= RSTCTRL_PPE1;
- + }
- +
- + ethsys_reset(eth, val);
- +
- + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
- + regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
- + 0x3ffffff);
- +
- + /* Set FE to PDMAv2 if necessary */
- + val = mtk_r32(eth, MTK_FE_GLO_MISC);
- + mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
- + }
-
- if (eth->pctl) {
- /* Set GE2 driving and slew rate */
- @@ -2706,11 +2839,47 @@ static int mtk_hw_init(struct mtk_eth *e
-
- /* FE int grouping */
- mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
- - mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.int_grp + 4);
- + mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
- mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
- - mtk_w32(eth, MTK_RX_DONE_INT, reg_map->qdma.int_grp + 4);
- + mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
- mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
-
- + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
- + /* PSE should not drop port8 and port9 packets */
- + mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
- +
- + /* PSE Free Queue Flow Control */
- + mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
- +
- + /* PSE config input queue threshold */
- + mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
- + mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
- + mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
- + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
- + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
- + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
- + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
- + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
- +
- + /* PSE config output queue threshold */
- + mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
- + mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
- + mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
- + mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
- + mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
- + mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
- + mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
- + mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
- +
- + /* GDM and CDM Threshold */
- + mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
- + mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
- + mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
- + mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
- + mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
- + mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
- + }
- +
- return 0;
-
- err_disable_pm:
- @@ -3240,12 +3409,8 @@ static int mtk_probe(struct platform_dev
- if (IS_ERR(eth->base))
- return PTR_ERR(eth->base);
-
- - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
- - eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
- + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
- eth->ip_align = NET_IP_ALIGN;
- - } else {
- - eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
- - }
-
- spin_lock_init(ð->page_lock);
- spin_lock_init(ð->tx_irq_lock);
- @@ -3481,6 +3646,10 @@ static const struct mtk_soc_data mt2701_
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- + .rx_irq_done_mask = MTK_RX_DONE_INT,
- + .rx_dma_l4_valid = RX_DMA_L4_VALID,
- + .dma_max_len = MTK_TX_DMA_BUF_LEN,
- + .dma_len_offset = 16,
- },
- };
-
- @@ -3494,6 +3663,10 @@ static const struct mtk_soc_data mt7621_
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- + .rx_irq_done_mask = MTK_RX_DONE_INT,
- + .rx_dma_l4_valid = RX_DMA_L4_VALID,
- + .dma_max_len = MTK_TX_DMA_BUF_LEN,
- + .dma_len_offset = 16,
- },
- };
-
- @@ -3508,6 +3681,10 @@ static const struct mtk_soc_data mt7622_
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- + .rx_irq_done_mask = MTK_RX_DONE_INT,
- + .rx_dma_l4_valid = RX_DMA_L4_VALID,
- + .dma_max_len = MTK_TX_DMA_BUF_LEN,
- + .dma_len_offset = 16,
- },
- };
-
- @@ -3521,6 +3698,10 @@ static const struct mtk_soc_data mt7623_
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- + .rx_irq_done_mask = MTK_RX_DONE_INT,
- + .rx_dma_l4_valid = RX_DMA_L4_VALID,
- + .dma_max_len = MTK_TX_DMA_BUF_LEN,
- + .dma_len_offset = 16,
- },
- };
-
- @@ -3534,6 +3715,10 @@ static const struct mtk_soc_data mt7629_
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- + .rx_irq_done_mask = MTK_RX_DONE_INT,
- + .rx_dma_l4_valid = RX_DMA_L4_VALID,
- + .dma_max_len = MTK_TX_DMA_BUF_LEN,
- + .dma_len_offset = 16,
- },
- };
-
- @@ -3546,6 +3731,10 @@ static const struct mtk_soc_data rt5350_
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- + .rx_irq_done_mask = MTK_RX_DONE_INT,
- + .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
- + .dma_max_len = MTK_TX_DMA_BUF_LEN,
- + .dma_len_offset = 16,
- },
- };
-
- --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
- +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
- @@ -23,6 +23,7 @@
- #define MTK_MAX_RX_LENGTH 1536
- #define MTK_MAX_RX_LENGTH_2K 2048
- #define MTK_TX_DMA_BUF_LEN 0x3fff
- +#define MTK_TX_DMA_BUF_LEN_V2 0xffff
- #define MTK_DMA_SIZE 512
- #define MTK_NAPI_WEIGHT 64
- #define MTK_MAC_COUNT 2
- @@ -83,6 +84,10 @@
- #define MTK_CDMQ_IG_CTRL 0x1400
- #define MTK_CDMQ_STAG_EN BIT(0)
-
- +/* CDMP Ingress Control Register */
- +#define MTK_CDMP_IG_CTRL 0x400
- +#define MTK_CDMP_STAG_EN BIT(0)
- +
- /* CDMP Exgress Control Register */
- #define MTK_CDMP_EG_CTRL 0x404
-
- @@ -102,13 +107,38 @@
- /* Unicast Filter MAC Address Register - High */
- #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
-
- +/* FE global misc reg*/
- +#define MTK_FE_GLO_MISC 0x124
- +
- +/* PSE Free Queue Flow Control */
- +#define PSE_FQFC_CFG1 0x100
- +#define PSE_FQFC_CFG2 0x104
- +#define PSE_DROP_CFG 0x108
- +
- +/* PSE Input Queue Reservation Register*/
- +#define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
- +
- +/* PSE Output Queue Threshold Register*/
- +#define PSE_OQ_TH(x) (0x160 + (((x) - 1) << 2))
- +
- +/* GDM and CDM Threshold */
- +#define MTK_GDM2_THRES 0x1530
- +#define MTK_CDMW0_THRES 0x164c
- +#define MTK_CDMW1_THRES 0x1650
- +#define MTK_CDME0_THRES 0x1654
- +#define MTK_CDME1_THRES 0x1658
- +#define MTK_CDMM_THRES 0x165c
- +
- /* PDMA HW LRO Control Registers */
- #define MTK_PDMA_LRO_CTRL_DW0 0x980
- #define MTK_LRO_EN BIT(0)
- #define MTK_L3_CKS_UPD_EN BIT(7)
- +#define MTK_L3_CKS_UPD_EN_V2 BIT(19)
- #define MTK_LRO_ALT_PKT_CNT_MODE BIT(21)
- #define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26)
- +#define MTK_LRO_RING_RELINQUISH_REQ_V2 (0xf << 24)
- #define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29)
- +#define MTK_LRO_RING_RELINQUISH_DONE_V2 (0xf << 28)
-
- #define MTK_PDMA_LRO_CTRL_DW1 0x984
- #define MTK_PDMA_LRO_CTRL_DW2 0x988
- @@ -180,6 +210,13 @@
- #define MTK_TX_DMA_EN BIT(0)
- #define MTK_DMA_BUSY_TIMEOUT_US 1000000
-
- +/* QDMA V2 Global Configuration Register */
- +#define MTK_CHK_DDONE_EN BIT(28)
- +#define MTK_DMAD_WR_WDONE BIT(26)
- +#define MTK_WCOMP_EN BIT(24)
- +#define MTK_RESV_BUF (0x40 << 16)
- +#define MTK_MUTLI_CNT (0x4 << 12)
- +
- /* QDMA Flow Control Register */
- #define FC_THRES_DROP_MODE BIT(20)
- #define FC_THRES_DROP_EN (7 << 16)
- @@ -199,11 +236,32 @@
- #define MTK_RX_DONE_INT MTK_RX_DONE_DLY
- #define MTK_TX_DONE_INT MTK_TX_DONE_DLY
-
- +#define MTK_RX_DONE_INT_V2 BIT(14)
- +
- /* QDMA Interrupt grouping registers */
- #define MTK_RLS_DONE_INT BIT(0)
-
- #define MTK_STAT_OFFSET 0x40
-
- +/* QDMA TX NUM */
- +#define MTK_QDMA_TX_NUM 16
- +#define MTK_QDMA_TX_MASK (MTK_QDMA_TX_NUM - 1)
- +#define QID_BITS_V2(x) (((x) & 0x3f) << 16)
- +#define MTK_QDMA_GMAC2_QID 8
- +
- +#define MTK_TX_DMA_BUF_SHIFT 8
- +
- +/* QDMA V2 descriptor txd6 */
- +#define TX_DMA_INS_VLAN_V2 BIT(16)
- +/* QDMA V2 descriptor txd5 */
- +#define TX_DMA_CHKSUM_V2 (0x7 << 28)
- +#define TX_DMA_TSO_V2 BIT(31)
- +
- +/* QDMA V2 descriptor txd4 */
- +#define TX_DMA_FPORT_SHIFT_V2 8
- +#define TX_DMA_FPORT_MASK_V2 0xf
- +#define TX_DMA_SWC_V2 BIT(30)
- +
- #define MTK_WDMA0_BASE 0x2800
- #define MTK_WDMA1_BASE 0x2c00
-
- @@ -217,10 +275,9 @@
- /* QDMA descriptor txd3 */
- #define TX_DMA_OWNER_CPU BIT(31)
- #define TX_DMA_LS0 BIT(30)
- -#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16)
- -#define TX_DMA_PLEN1(_x) ((_x) & MTK_TX_DMA_BUF_LEN)
- +#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
- +#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
- #define TX_DMA_SWC BIT(14)
- -#define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16)
-
- /* PDMA on MT7628 */
- #define TX_DMA_DONE BIT(31)
- @@ -230,12 +287,14 @@
- /* QDMA descriptor rxd2 */
- #define RX_DMA_DONE BIT(31)
- #define RX_DMA_LSO BIT(30)
- -#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
- -#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
- +#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
- +#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
- #define RX_DMA_VTAG BIT(15)
-
- /* QDMA descriptor rxd3 */
- -#define RX_DMA_VID(_x) ((_x) & 0xfff)
- +#define RX_DMA_VID(x) ((x) & VLAN_VID_MASK)
- +#define RX_DMA_TCI(x) ((x) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
- +#define RX_DMA_VPID(x) (((x) >> 16) & 0xffff)
-
- /* QDMA descriptor rxd4 */
- #define MTK_RXD4_FOE_ENTRY GENMASK(13, 0)
- @@ -246,10 +305,15 @@
- /* QDMA descriptor rxd4 */
- #define RX_DMA_L4_VALID BIT(24)
- #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
- -#define RX_DMA_FPORT_SHIFT 19
- -#define RX_DMA_FPORT_MASK 0x7
- #define RX_DMA_SPECIAL_TAG BIT(22)
-
- +#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf)
- +#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7)
- +
- +/* PDMA V2 descriptor rxd3 */
- +#define RX_DMA_VTAG_V2 BIT(0)
- +#define RX_DMA_L4_VALID_V2 BIT(2)
- +
- /* PHY Indirect Access Control registers */
- #define MTK_PHY_IAC 0x10004
- #define PHY_IAC_ACCESS BIT(31)
- @@ -371,6 +435,16 @@
- #define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5)
-
- /* ethernet reset control register */
- +#define ETHSYS_RSTCTRL 0x34
- +#define RSTCTRL_FE BIT(6)
- +#define RSTCTRL_PPE BIT(31)
- +#define RSTCTRL_PPE1 BIT(30)
- +#define RSTCTRL_ETH BIT(23)
- +
- +/* ethernet reset check idle register */
- +#define ETHSYS_FE_RST_CHK_IDLE_EN 0x28
- +
- +/* ethernet reset control register */
- #define ETHSYS_RSTCTRL 0x34
- #define RSTCTRL_FE BIT(6)
- #define RSTCTRL_PPE BIT(31)
- @@ -454,6 +528,17 @@ struct mtk_rx_dma {
- unsigned int rxd4;
- } __packed __aligned(4);
-
- +struct mtk_rx_dma_v2 {
- + unsigned int rxd1;
- + unsigned int rxd2;
- + unsigned int rxd3;
- + unsigned int rxd4;
- + unsigned int rxd5;
- + unsigned int rxd6;
- + unsigned int rxd7;
- + unsigned int rxd8;
- +} __packed __aligned(4);
- +
- struct mtk_tx_dma {
- unsigned int txd1;
- unsigned int txd2;
- @@ -461,6 +546,17 @@ struct mtk_tx_dma {
- unsigned int txd4;
- } __packed __aligned(4);
-
- +struct mtk_tx_dma_v2 {
- + unsigned int txd1;
- + unsigned int txd2;
- + unsigned int txd3;
- + unsigned int txd4;
- + unsigned int txd5;
- + unsigned int txd6;
- + unsigned int txd7;
- + unsigned int txd8;
- +} __packed __aligned(4);
- +
- struct mtk_eth;
- struct mtk_mac;
-
- @@ -647,7 +743,9 @@ enum mkt_eth_capabilities {
- MTK_SHARED_INT_BIT,
- MTK_TRGMII_MT7621_CLK_BIT,
- MTK_QDMA_BIT,
- + MTK_NETSYS_V2_BIT,
- MTK_SOC_MT7628_BIT,
- + MTK_RSTCTRL_PPE1_BIT,
-
- /* MUX BITS*/
- MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
- @@ -679,7 +777,9 @@ enum mkt_eth_capabilities {
- #define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT)
- #define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT)
- #define MTK_QDMA BIT(MTK_QDMA_BIT)
- +#define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT)
- #define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
- +#define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT)
-
- #define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
- BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
- @@ -756,6 +856,7 @@ struct mtk_tx_dma_desc_info {
- dma_addr_t addr;
- u32 size;
- u16 vlan_tci;
- + u16 qid;
- u8 gso:1;
- u8 csum:1;
- u8 vlan:1;
- @@ -813,6 +914,10 @@ struct mtk_reg_map {
- * the extra setup for those pins used by GMAC.
- * @txd_size Tx DMA descriptor size.
- * @rxd_size Rx DMA descriptor size.
- + * @rx_irq_done_mask Rx irq done register mask.
- + * @rx_dma_l4_valid Rx DMA valid register mask.
- + * @dma_max_len Max DMA tx/rx buffer length.
- + * @dma_len_offset Tx/Rx DMA length field offset.
- */
- struct mtk_soc_data {
- const struct mtk_reg_map *reg_map;
- @@ -825,6 +930,10 @@ struct mtk_soc_data {
- struct {
- u32 txd_size;
- u32 rxd_size;
- + u32 rx_irq_done_mask;
- + u32 rx_dma_l4_valid;
- + u32 dma_max_len;
- + u32 dma_len_offset;
- } txrx;
- };
-
- @@ -943,7 +1052,6 @@ struct mtk_eth {
- u32 tx_bytes;
- struct dim tx_dim;
-
- - u32 rx_dma_l4_valid;
- int ip_align;
-
- struct mtk_ppe *ppe;
|