702-v5.19-27-net-ethernet-mtk_eth_soc-introduce-MTK_NETSYS_V2-sup.patch 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917
  1. From: Lorenzo Bianconi <[email protected]>
  2. Date: Fri, 20 May 2022 20:11:36 +0200
  3. Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce MTK_NETSYS_V2 support
  4. Introduce MTK_NETSYS_V2 support. MTK_NETSYS_V2 defines 32B TX/RX DMA
  5. descriptors.
  6. This is a preliminary patch to add mt7986 ethernet support.
  7. Tested-by: Sam Shih <[email protected]>
  8. Signed-off-by: Lorenzo Bianconi <[email protected]>
  9. Signed-off-by: David S. Miller <[email protected]>
  10. ---
  11. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  12. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  13. @@ -873,8 +873,8 @@ static inline int mtk_max_buf_size(int f
  14. return buf_size;
  15. }
  16. -static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
  17. - struct mtk_rx_dma *dma_rxd)
  18. +static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
  19. + struct mtk_rx_dma_v2 *dma_rxd)
  20. {
  21. rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
  22. if (!(rxd->rxd2 & RX_DMA_DONE))
  23. @@ -883,6 +883,10 @@ static inline bool mtk_rx_get_desc(struc
  24. rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
  25. rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
  26. rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
  27. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  28. + rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
  29. + rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
  30. + }
  31. return true;
  32. }
  33. @@ -927,7 +931,7 @@ static int mtk_init_fq_dma(struct mtk_et
  34. phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
  35. for (i = 0; i < cnt; i++) {
  36. - struct mtk_tx_dma *txd;
  37. + struct mtk_tx_dma_v2 *txd;
  38. txd = (void *)eth->scratch_ring + i * soc->txrx.txd_size;
  39. txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
  40. @@ -937,6 +941,12 @@ static int mtk_init_fq_dma(struct mtk_et
  41. txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
  42. txd->txd4 = 0;
  43. + if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
  44. + txd->txd5 = 0;
  45. + txd->txd6 = 0;
  46. + txd->txd7 = 0;
  47. + txd->txd8 = 0;
  48. + }
  49. }
  50. mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
  51. @@ -1040,10 +1050,12 @@ static void setup_tx_buf(struct mtk_eth
  52. }
  53. }
  54. -static void mtk_tx_set_dma_desc(struct net_device *dev, struct mtk_tx_dma *desc,
  55. - struct mtk_tx_dma_desc_info *info)
  56. +static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
  57. + struct mtk_tx_dma_desc_info *info)
  58. {
  59. struct mtk_mac *mac = netdev_priv(dev);
  60. + struct mtk_eth *eth = mac->hw;
  61. + struct mtk_tx_dma *desc = txd;
  62. u32 data;
  63. WRITE_ONCE(desc->txd1, info->addr);
  64. @@ -1067,6 +1079,59 @@ static void mtk_tx_set_dma_desc(struct n
  65. WRITE_ONCE(desc->txd4, data);
  66. }
  67. +static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
  68. + struct mtk_tx_dma_desc_info *info)
  69. +{
  70. + struct mtk_mac *mac = netdev_priv(dev);
  71. + struct mtk_tx_dma_v2 *desc = txd;
  72. + struct mtk_eth *eth = mac->hw;
  73. + u32 data;
  74. +
  75. + WRITE_ONCE(desc->txd1, info->addr);
  76. +
  77. + data = TX_DMA_PLEN0(info->size);
  78. + if (info->last)
  79. + data |= TX_DMA_LS0;
  80. + WRITE_ONCE(desc->txd3, data);
  81. +
  82. + if (!info->qid && mac->id)
  83. + info->qid = MTK_QDMA_GMAC2_QID;
  84. +
  85. + data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
  86. + data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
  87. + WRITE_ONCE(desc->txd4, data);
  88. +
  89. + data = 0;
  90. + if (info->first) {
  91. + if (info->gso)
  92. + data |= TX_DMA_TSO_V2;
  93. + /* tx checksum offload */
  94. + if (info->csum)
  95. + data |= TX_DMA_CHKSUM_V2;
  96. + }
  97. + WRITE_ONCE(desc->txd5, data);
  98. +
  99. + data = 0;
  100. + if (info->first && info->vlan)
  101. + data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
  102. + WRITE_ONCE(desc->txd6, data);
  103. +
  104. + WRITE_ONCE(desc->txd7, 0);
  105. + WRITE_ONCE(desc->txd8, 0);
  106. +}
  107. +
  108. +static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
  109. + struct mtk_tx_dma_desc_info *info)
  110. +{
  111. + struct mtk_mac *mac = netdev_priv(dev);
  112. + struct mtk_eth *eth = mac->hw;
  113. +
  114. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  115. + mtk_tx_set_dma_desc_v2(dev, txd, info);
  116. + else
  117. + mtk_tx_set_dma_desc_v1(dev, txd, info);
  118. +}
  119. +
  120. static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
  121. int tx_num, struct mtk_tx_ring *ring, bool gso)
  122. {
  123. @@ -1075,6 +1140,7 @@ static int mtk_tx_map(struct sk_buff *sk
  124. .gso = gso,
  125. .csum = skb->ip_summed == CHECKSUM_PARTIAL,
  126. .vlan = skb_vlan_tag_present(skb),
  127. + .qid = skb->mark & MTK_QDMA_TX_MASK,
  128. .vlan_tci = skb_vlan_tag_get(skb),
  129. .first = true,
  130. .last = !skb_is_nonlinear(skb),
  131. @@ -1134,7 +1200,9 @@ static int mtk_tx_map(struct sk_buff *sk
  132. }
  133. memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
  134. - txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN);
  135. + txd_info.size = min_t(unsigned int, frag_size,
  136. + soc->txrx.dma_max_len);
  137. + txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
  138. txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
  139. !(frag_size - txd_info.size);
  140. txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
  141. @@ -1215,17 +1283,16 @@ err_dma:
  142. return -ENOMEM;
  143. }
  144. -static inline int mtk_cal_txd_req(struct sk_buff *skb)
  145. +static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
  146. {
  147. - int i, nfrags;
  148. + int i, nfrags = 1;
  149. skb_frag_t *frag;
  150. - nfrags = 1;
  151. if (skb_is_gso(skb)) {
  152. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  153. frag = &skb_shinfo(skb)->frags[i];
  154. nfrags += DIV_ROUND_UP(skb_frag_size(frag),
  155. - MTK_TX_DMA_BUF_LEN);
  156. + eth->soc->txrx.dma_max_len);
  157. }
  158. } else {
  159. nfrags += skb_shinfo(skb)->nr_frags;
  160. @@ -1277,7 +1344,7 @@ static netdev_tx_t mtk_start_xmit(struct
  161. if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
  162. goto drop;
  163. - tx_num = mtk_cal_txd_req(skb);
  164. + tx_num = mtk_cal_txd_req(eth, skb);
  165. if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
  166. netif_stop_queue(dev);
  167. netif_err(eth, tx_queued, dev,
  168. @@ -1369,7 +1436,7 @@ static int mtk_poll_rx(struct napi_struc
  169. int idx;
  170. struct sk_buff *skb;
  171. u8 *data, *new_data;
  172. - struct mtk_rx_dma *rxd, trxd;
  173. + struct mtk_rx_dma_v2 *rxd, trxd;
  174. int done = 0, bytes = 0;
  175. while (done < budget) {
  176. @@ -1377,7 +1444,7 @@ static int mtk_poll_rx(struct napi_struc
  177. unsigned int pktlen;
  178. dma_addr_t dma_addr;
  179. u32 hash, reason;
  180. - int mac;
  181. + int mac = 0;
  182. ring = mtk_get_rx_ring(eth);
  183. if (unlikely(!ring))
  184. @@ -1387,16 +1454,15 @@ static int mtk_poll_rx(struct napi_struc
  185. rxd = (void *)ring->dma + idx * eth->soc->txrx.rxd_size;
  186. data = ring->data[idx];
  187. - if (!mtk_rx_get_desc(&trxd, rxd))
  188. + if (!mtk_rx_get_desc(eth, &trxd, rxd))
  189. break;
  190. /* find out which mac the packet come from. values start at 1 */
  191. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) ||
  192. - (trxd.rxd4 & RX_DMA_SPECIAL_TAG))
  193. - mac = 0;
  194. - else
  195. - mac = ((trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
  196. - RX_DMA_FPORT_MASK) - 1;
  197. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  198. + mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
  199. + else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
  200. + !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
  201. + mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
  202. if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
  203. !eth->netdev[mac]))
  204. @@ -1442,7 +1508,7 @@ static int mtk_poll_rx(struct napi_struc
  205. pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
  206. skb->dev = netdev;
  207. skb_put(skb, pktlen);
  208. - if (trxd.rxd4 & eth->rx_dma_l4_valid)
  209. + if (trxd.rxd4 & eth->soc->txrx.rx_dma_l4_valid)
  210. skb->ip_summed = CHECKSUM_UNNECESSARY;
  211. else
  212. skb_checksum_none_assert(skb);
  213. @@ -1460,10 +1526,25 @@ static int mtk_poll_rx(struct napi_struc
  214. mtk_ppe_check_skb(eth->ppe, skb,
  215. trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
  216. - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
  217. - (trxd.rxd2 & RX_DMA_VTAG))
  218. - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  219. - RX_DMA_VID(trxd.rxd3));
  220. + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
  221. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  222. + if (trxd.rxd3 & RX_DMA_VTAG_V2)
  223. + __vlan_hwaccel_put_tag(skb,
  224. + htons(RX_DMA_VPID(trxd.rxd4)),
  225. + RX_DMA_VID(trxd.rxd4));
  226. + } else if (trxd.rxd2 & RX_DMA_VTAG) {
  227. + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  228. + RX_DMA_VID(trxd.rxd3));
  229. + }
  230. +
  231. + /* If the device is attached to a dsa switch, the special
  232. + * tag inserted in VLAN field by hw switch can * be offloaded
  233. + * by RX HW VLAN offload. Clear vlan info.
  234. + */
  235. + if (netdev_uses_dsa(netdev))
  236. + __vlan_hwaccel_clear_tag(skb);
  237. + }
  238. +
  239. skb_record_rx_queue(skb, 0);
  240. napi_gro_receive(napi, skb);
  241. @@ -1475,7 +1556,7 @@ release_desc:
  242. if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
  243. rxd->rxd2 = RX_DMA_LSO;
  244. else
  245. - rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
  246. + rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
  247. ring->calc_idx = idx;
  248. @@ -1677,7 +1758,8 @@ static int mtk_napi_rx(struct napi_struc
  249. do {
  250. int rx_done;
  251. - mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.irq_status);
  252. + mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
  253. + reg_map->pdma.irq_status);
  254. rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
  255. rx_done_total += rx_done;
  256. @@ -1691,10 +1773,11 @@ static int mtk_napi_rx(struct napi_struc
  257. if (rx_done_total == budget)
  258. return budget;
  259. - } while (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT);
  260. + } while (mtk_r32(eth, reg_map->pdma.irq_status) &
  261. + eth->soc->txrx.rx_irq_done_mask);
  262. if (napi_complete_done(napi, rx_done_total))
  263. - mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
  264. + mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
  265. return rx_done_total;
  266. }
  267. @@ -1704,7 +1787,7 @@ static int mtk_tx_alloc(struct mtk_eth *
  268. const struct mtk_soc_data *soc = eth->soc;
  269. struct mtk_tx_ring *ring = &eth->tx_ring;
  270. int i, sz = soc->txrx.txd_size;
  271. - struct mtk_tx_dma *txd;
  272. + struct mtk_tx_dma_v2 *txd;
  273. ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
  274. GFP_KERNEL);
  275. @@ -1724,13 +1807,19 @@ static int mtk_tx_alloc(struct mtk_eth *
  276. txd->txd2 = next_ptr;
  277. txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
  278. txd->txd4 = 0;
  279. + if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
  280. + txd->txd5 = 0;
  281. + txd->txd6 = 0;
  282. + txd->txd7 = 0;
  283. + txd->txd8 = 0;
  284. + }
  285. }
  286. /* On MT7688 (PDMA only) this driver uses the ring->dma structs
  287. * only as the framework. The real HW descriptors are the PDMA
  288. * descriptors in ring->dma_pdma.
  289. */
  290. - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
  291. + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
  292. ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
  293. &ring->phys_pdma, GFP_KERNEL);
  294. if (!ring->dma_pdma)
  295. @@ -1810,13 +1899,11 @@ static int mtk_rx_alloc(struct mtk_eth *
  296. struct mtk_rx_ring *ring;
  297. int rx_data_len, rx_dma_size;
  298. int i;
  299. - u32 offset = 0;
  300. if (rx_flag == MTK_RX_FLAGS_QDMA) {
  301. if (ring_no)
  302. return -EINVAL;
  303. ring = &eth->rx_ring_qdma;
  304. - offset = 0x1000;
  305. } else {
  306. ring = &eth->rx_ring[ring_no];
  307. }
  308. @@ -1852,7 +1939,7 @@ static int mtk_rx_alloc(struct mtk_eth *
  309. return -ENOMEM;
  310. for (i = 0; i < rx_dma_size; i++) {
  311. - struct mtk_rx_dma *rxd;
  312. + struct mtk_rx_dma_v2 *rxd;
  313. dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
  314. ring->data[i] + NET_SKB_PAD + eth->ip_align,
  315. @@ -1867,26 +1954,47 @@ static int mtk_rx_alloc(struct mtk_eth *
  316. if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
  317. rxd->rxd2 = RX_DMA_LSO;
  318. else
  319. - rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
  320. + rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
  321. rxd->rxd3 = 0;
  322. rxd->rxd4 = 0;
  323. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  324. + rxd->rxd5 = 0;
  325. + rxd->rxd6 = 0;
  326. + rxd->rxd7 = 0;
  327. + rxd->rxd8 = 0;
  328. + }
  329. }
  330. ring->dma_size = rx_dma_size;
  331. ring->calc_idx_update = false;
  332. ring->calc_idx = rx_dma_size - 1;
  333. - ring->crx_idx_reg = reg_map->pdma.pcrx_ptr + ring_no * MTK_QRX_OFFSET;
  334. + if (rx_flag == MTK_RX_FLAGS_QDMA)
  335. + ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
  336. + ring_no * MTK_QRX_OFFSET;
  337. + else
  338. + ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
  339. + ring_no * MTK_QRX_OFFSET;
  340. /* make sure that all changes to the dma ring are flushed before we
  341. * continue
  342. */
  343. wmb();
  344. - mtk_w32(eth, ring->phys,
  345. - reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET + offset);
  346. - mtk_w32(eth, rx_dma_size,
  347. - reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET + offset);
  348. - mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
  349. - mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), reg_map->pdma.rst_idx + offset);
  350. + if (rx_flag == MTK_RX_FLAGS_QDMA) {
  351. + mtk_w32(eth, ring->phys,
  352. + reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
  353. + mtk_w32(eth, rx_dma_size,
  354. + reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
  355. + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
  356. + reg_map->qdma.rst_idx);
  357. + } else {
  358. + mtk_w32(eth, ring->phys,
  359. + reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
  360. + mtk_w32(eth, rx_dma_size,
  361. + reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
  362. + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
  363. + reg_map->pdma.rst_idx);
  364. + }
  365. + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
  366. return 0;
  367. }
  368. @@ -2311,7 +2419,7 @@ static irqreturn_t mtk_handle_irq_rx(int
  369. eth->rx_events++;
  370. if (likely(napi_schedule_prep(&eth->rx_napi))) {
  371. __napi_schedule(&eth->rx_napi);
  372. - mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
  373. + mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
  374. }
  375. return IRQ_HANDLED;
  376. @@ -2335,8 +2443,10 @@ static irqreturn_t mtk_handle_irq(int ir
  377. struct mtk_eth *eth = _eth;
  378. const struct mtk_reg_map *reg_map = eth->soc->reg_map;
  379. - if (mtk_r32(eth, reg_map->pdma.irq_mask) & MTK_RX_DONE_INT) {
  380. - if (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT)
  381. + if (mtk_r32(eth, reg_map->pdma.irq_mask) &
  382. + eth->soc->txrx.rx_irq_done_mask) {
  383. + if (mtk_r32(eth, reg_map->pdma.irq_status) &
  384. + eth->soc->txrx.rx_irq_done_mask)
  385. mtk_handle_irq_rx(irq, _eth);
  386. }
  387. if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
  388. @@ -2354,16 +2464,16 @@ static void mtk_poll_controller(struct n
  389. struct mtk_eth *eth = mac->hw;
  390. mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
  391. - mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
  392. + mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
  393. mtk_handle_irq_rx(eth->irq[2], dev);
  394. mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
  395. - mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
  396. + mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
  397. }
  398. #endif
  399. static int mtk_start_dma(struct mtk_eth *eth)
  400. {
  401. - u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
  402. + u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
  403. const struct mtk_reg_map *reg_map = eth->soc->reg_map;
  404. int err;
  405. @@ -2374,12 +2484,19 @@ static int mtk_start_dma(struct mtk_eth
  406. }
  407. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
  408. - mtk_w32(eth,
  409. - MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
  410. - MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
  411. - MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
  412. - MTK_RX_BT_32DWORDS,
  413. - reg_map->qdma.glo_cfg);
  414. + val = mtk_r32(eth, reg_map->qdma.glo_cfg);
  415. + val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
  416. + MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
  417. + MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
  418. +
  419. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  420. + val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
  421. + MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
  422. + MTK_CHK_DDONE_EN;
  423. + else
  424. + val |= MTK_RX_BT_32DWORDS;
  425. + mtk_w32(eth, val, reg_map->qdma.glo_cfg);
  426. +
  427. mtk_w32(eth,
  428. MTK_RX_DMA_EN | rx_2b_offset |
  429. MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
  430. @@ -2453,7 +2570,7 @@ static int mtk_open(struct net_device *d
  431. napi_enable(&eth->tx_napi);
  432. napi_enable(&eth->rx_napi);
  433. mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
  434. - mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
  435. + mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
  436. refcount_set(&eth->dma_refcnt, 1);
  437. }
  438. else
  439. @@ -2505,7 +2622,7 @@ static int mtk_stop(struct net_device *d
  440. mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
  441. mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
  442. - mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
  443. + mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
  444. napi_disable(&eth->tx_napi);
  445. napi_disable(&eth->rx_napi);
  446. @@ -2665,9 +2782,25 @@ static int mtk_hw_init(struct mtk_eth *e
  447. return 0;
  448. }
  449. - /* Non-MT7628 handling... */
  450. - ethsys_reset(eth, RSTCTRL_FE);
  451. - ethsys_reset(eth, RSTCTRL_PPE);
  452. + val = RSTCTRL_FE | RSTCTRL_PPE;
  453. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  454. + regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
  455. +
  456. + val |= RSTCTRL_ETH;
  457. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
  458. + val |= RSTCTRL_PPE1;
  459. + }
  460. +
  461. + ethsys_reset(eth, val);
  462. +
  463. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  464. + regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
  465. + 0x3ffffff);
  466. +
  467. + /* Set FE to PDMAv2 if necessary */
  468. + val = mtk_r32(eth, MTK_FE_GLO_MISC);
  469. + mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
  470. + }
  471. if (eth->pctl) {
  472. /* Set GE2 driving and slew rate */
  473. @@ -2706,11 +2839,47 @@ static int mtk_hw_init(struct mtk_eth *e
  474. /* FE int grouping */
  475. mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
  476. - mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.int_grp + 4);
  477. + mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
  478. mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
  479. - mtk_w32(eth, MTK_RX_DONE_INT, reg_map->qdma.int_grp + 4);
  480. + mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
  481. mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
  482. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  483. + /* PSE should not drop port8 and port9 packets */
  484. + mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
  485. +
  486. + /* PSE Free Queue Flow Control */
  487. + mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
  488. +
  489. + /* PSE config input queue threshold */
  490. + mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
  491. + mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
  492. + mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
  493. + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
  494. + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
  495. + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
  496. + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
  497. + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
  498. +
  499. + /* PSE config output queue threshold */
  500. + mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
  501. + mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
  502. + mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
  503. + mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
  504. + mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
  505. + mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
  506. + mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
  507. + mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
  508. +
  509. + /* GDM and CDM Threshold */
  510. + mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
  511. + mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
  512. + mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
  513. + mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
  514. + mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
  515. + mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
  516. + }
  517. +
  518. return 0;
  519. err_disable_pm:
  520. @@ -3240,12 +3409,8 @@ static int mtk_probe(struct platform_dev
  521. if (IS_ERR(eth->base))
  522. return PTR_ERR(eth->base);
  523. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
  524. - eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
  525. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
  526. eth->ip_align = NET_IP_ALIGN;
  527. - } else {
  528. - eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
  529. - }
  530. spin_lock_init(&eth->page_lock);
  531. spin_lock_init(&eth->tx_irq_lock);
  532. @@ -3481,6 +3646,10 @@ static const struct mtk_soc_data mt2701_
  533. .txrx = {
  534. .txd_size = sizeof(struct mtk_tx_dma),
  535. .rxd_size = sizeof(struct mtk_rx_dma),
  536. + .rx_irq_done_mask = MTK_RX_DONE_INT,
  537. + .rx_dma_l4_valid = RX_DMA_L4_VALID,
  538. + .dma_max_len = MTK_TX_DMA_BUF_LEN,
  539. + .dma_len_offset = 16,
  540. },
  541. };
  542. @@ -3494,6 +3663,10 @@ static const struct mtk_soc_data mt7621_
  543. .txrx = {
  544. .txd_size = sizeof(struct mtk_tx_dma),
  545. .rxd_size = sizeof(struct mtk_rx_dma),
  546. + .rx_irq_done_mask = MTK_RX_DONE_INT,
  547. + .rx_dma_l4_valid = RX_DMA_L4_VALID,
  548. + .dma_max_len = MTK_TX_DMA_BUF_LEN,
  549. + .dma_len_offset = 16,
  550. },
  551. };
  552. @@ -3508,6 +3681,10 @@ static const struct mtk_soc_data mt7622_
  553. .txrx = {
  554. .txd_size = sizeof(struct mtk_tx_dma),
  555. .rxd_size = sizeof(struct mtk_rx_dma),
  556. + .rx_irq_done_mask = MTK_RX_DONE_INT,
  557. + .rx_dma_l4_valid = RX_DMA_L4_VALID,
  558. + .dma_max_len = MTK_TX_DMA_BUF_LEN,
  559. + .dma_len_offset = 16,
  560. },
  561. };
  562. @@ -3521,6 +3698,10 @@ static const struct mtk_soc_data mt7623_
  563. .txrx = {
  564. .txd_size = sizeof(struct mtk_tx_dma),
  565. .rxd_size = sizeof(struct mtk_rx_dma),
  566. + .rx_irq_done_mask = MTK_RX_DONE_INT,
  567. + .rx_dma_l4_valid = RX_DMA_L4_VALID,
  568. + .dma_max_len = MTK_TX_DMA_BUF_LEN,
  569. + .dma_len_offset = 16,
  570. },
  571. };
  572. @@ -3534,6 +3715,10 @@ static const struct mtk_soc_data mt7629_
  573. .txrx = {
  574. .txd_size = sizeof(struct mtk_tx_dma),
  575. .rxd_size = sizeof(struct mtk_rx_dma),
  576. + .rx_irq_done_mask = MTK_RX_DONE_INT,
  577. + .rx_dma_l4_valid = RX_DMA_L4_VALID,
  578. + .dma_max_len = MTK_TX_DMA_BUF_LEN,
  579. + .dma_len_offset = 16,
  580. },
  581. };
  582. @@ -3546,6 +3731,10 @@ static const struct mtk_soc_data rt5350_
  583. .txrx = {
  584. .txd_size = sizeof(struct mtk_tx_dma),
  585. .rxd_size = sizeof(struct mtk_rx_dma),
  586. + .rx_irq_done_mask = MTK_RX_DONE_INT,
  587. + .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
  588. + .dma_max_len = MTK_TX_DMA_BUF_LEN,
  589. + .dma_len_offset = 16,
  590. },
  591. };
  592. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  593. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  594. @@ -23,6 +23,7 @@
  595. #define MTK_MAX_RX_LENGTH 1536
  596. #define MTK_MAX_RX_LENGTH_2K 2048
  597. #define MTK_TX_DMA_BUF_LEN 0x3fff
  598. +#define MTK_TX_DMA_BUF_LEN_V2 0xffff
  599. #define MTK_DMA_SIZE 512
  600. #define MTK_NAPI_WEIGHT 64
  601. #define MTK_MAC_COUNT 2
  602. @@ -83,6 +84,10 @@
  603. #define MTK_CDMQ_IG_CTRL 0x1400
  604. #define MTK_CDMQ_STAG_EN BIT(0)
  605. +/* CDMP Ingress Control Register */
  606. +#define MTK_CDMP_IG_CTRL 0x400
  607. +#define MTK_CDMP_STAG_EN BIT(0)
  608. +
  609. /* CDMP Exgress Control Register */
  610. #define MTK_CDMP_EG_CTRL 0x404
  611. @@ -102,13 +107,38 @@
  612. /* Unicast Filter MAC Address Register - High */
  613. #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
  614. +/* FE global misc reg*/
  615. +#define MTK_FE_GLO_MISC 0x124
  616. +
  617. +/* PSE Free Queue Flow Control */
  618. +#define PSE_FQFC_CFG1 0x100
  619. +#define PSE_FQFC_CFG2 0x104
  620. +#define PSE_DROP_CFG 0x108
  621. +
  622. +/* PSE Input Queue Reservation Register*/
  623. +#define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
  624. +
  625. +/* PSE Output Queue Threshold Register*/
  626. +#define PSE_OQ_TH(x) (0x160 + (((x) - 1) << 2))
  627. +
  628. +/* GDM and CDM Threshold */
  629. +#define MTK_GDM2_THRES 0x1530
  630. +#define MTK_CDMW0_THRES 0x164c
  631. +#define MTK_CDMW1_THRES 0x1650
  632. +#define MTK_CDME0_THRES 0x1654
  633. +#define MTK_CDME1_THRES 0x1658
  634. +#define MTK_CDMM_THRES 0x165c
  635. +
  636. /* PDMA HW LRO Control Registers */
  637. #define MTK_PDMA_LRO_CTRL_DW0 0x980
  638. #define MTK_LRO_EN BIT(0)
  639. #define MTK_L3_CKS_UPD_EN BIT(7)
  640. +#define MTK_L3_CKS_UPD_EN_V2 BIT(19)
  641. #define MTK_LRO_ALT_PKT_CNT_MODE BIT(21)
  642. #define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26)
  643. +#define MTK_LRO_RING_RELINQUISH_REQ_V2 (0xf << 24)
  644. #define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29)
  645. +#define MTK_LRO_RING_RELINQUISH_DONE_V2 (0xf << 28)
  646. #define MTK_PDMA_LRO_CTRL_DW1 0x984
  647. #define MTK_PDMA_LRO_CTRL_DW2 0x988
  648. @@ -180,6 +210,13 @@
  649. #define MTK_TX_DMA_EN BIT(0)
  650. #define MTK_DMA_BUSY_TIMEOUT_US 1000000
  651. +/* QDMA V2 Global Configuration Register */
  652. +#define MTK_CHK_DDONE_EN BIT(28)
  653. +#define MTK_DMAD_WR_WDONE BIT(26)
  654. +#define MTK_WCOMP_EN BIT(24)
  655. +#define MTK_RESV_BUF (0x40 << 16)
  656. +#define MTK_MUTLI_CNT (0x4 << 12)
  657. +
  658. /* QDMA Flow Control Register */
  659. #define FC_THRES_DROP_MODE BIT(20)
  660. #define FC_THRES_DROP_EN (7 << 16)
  661. @@ -199,11 +236,32 @@
  662. #define MTK_RX_DONE_INT MTK_RX_DONE_DLY
  663. #define MTK_TX_DONE_INT MTK_TX_DONE_DLY
  664. +#define MTK_RX_DONE_INT_V2 BIT(14)
  665. +
  666. /* QDMA Interrupt grouping registers */
  667. #define MTK_RLS_DONE_INT BIT(0)
  668. #define MTK_STAT_OFFSET 0x40
  669. +/* QDMA TX NUM */
  670. +#define MTK_QDMA_TX_NUM 16
  671. +#define MTK_QDMA_TX_MASK (MTK_QDMA_TX_NUM - 1)
  672. +#define QID_BITS_V2(x) (((x) & 0x3f) << 16)
  673. +#define MTK_QDMA_GMAC2_QID 8
  674. +
  675. +#define MTK_TX_DMA_BUF_SHIFT 8
  676. +
  677. +/* QDMA V2 descriptor txd6 */
  678. +#define TX_DMA_INS_VLAN_V2 BIT(16)
  679. +/* QDMA V2 descriptor txd5 */
  680. +#define TX_DMA_CHKSUM_V2 (0x7 << 28)
  681. +#define TX_DMA_TSO_V2 BIT(31)
  682. +
  683. +/* QDMA V2 descriptor txd4 */
  684. +#define TX_DMA_FPORT_SHIFT_V2 8
  685. +#define TX_DMA_FPORT_MASK_V2 0xf
  686. +#define TX_DMA_SWC_V2 BIT(30)
  687. +
  688. #define MTK_WDMA0_BASE 0x2800
  689. #define MTK_WDMA1_BASE 0x2c00
  690. @@ -217,10 +275,9 @@
  691. /* QDMA descriptor txd3 */
  692. #define TX_DMA_OWNER_CPU BIT(31)
  693. #define TX_DMA_LS0 BIT(30)
  694. -#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16)
  695. -#define TX_DMA_PLEN1(_x) ((_x) & MTK_TX_DMA_BUF_LEN)
  696. +#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
  697. +#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
  698. #define TX_DMA_SWC BIT(14)
  699. -#define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16)
  700. /* PDMA on MT7628 */
  701. #define TX_DMA_DONE BIT(31)
  702. @@ -230,12 +287,14 @@
  703. /* QDMA descriptor rxd2 */
  704. #define RX_DMA_DONE BIT(31)
  705. #define RX_DMA_LSO BIT(30)
  706. -#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
  707. -#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
  708. +#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
  709. +#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
  710. #define RX_DMA_VTAG BIT(15)
  711. /* QDMA descriptor rxd3 */
  712. -#define RX_DMA_VID(_x) ((_x) & 0xfff)
  713. +#define RX_DMA_VID(x) ((x) & VLAN_VID_MASK)
  714. +#define RX_DMA_TCI(x) ((x) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
  715. +#define RX_DMA_VPID(x) (((x) >> 16) & 0xffff)
  716. /* QDMA descriptor rxd4 */
  717. #define MTK_RXD4_FOE_ENTRY GENMASK(13, 0)
  718. @@ -246,10 +305,15 @@
  719. /* QDMA descriptor rxd4 */
  720. #define RX_DMA_L4_VALID BIT(24)
  721. #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
  722. -#define RX_DMA_FPORT_SHIFT 19
  723. -#define RX_DMA_FPORT_MASK 0x7
  724. #define RX_DMA_SPECIAL_TAG BIT(22)
  725. +#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf)
  726. +#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7)
  727. +
  728. +/* PDMA V2 descriptor rxd3 */
  729. +#define RX_DMA_VTAG_V2 BIT(0)
  730. +#define RX_DMA_L4_VALID_V2 BIT(2)
  731. +
  732. /* PHY Indirect Access Control registers */
  733. #define MTK_PHY_IAC 0x10004
  734. #define PHY_IAC_ACCESS BIT(31)
  735. @@ -371,6 +435,16 @@
  736. #define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5)
  737. /* ethernet reset control register */
  738. +#define ETHSYS_RSTCTRL 0x34
  739. +#define RSTCTRL_FE BIT(6)
  740. +#define RSTCTRL_PPE BIT(31)
  741. +#define RSTCTRL_PPE1 BIT(30)
  742. +#define RSTCTRL_ETH BIT(23)
  743. +
  744. +/* ethernet reset check idle register */
  745. +#define ETHSYS_FE_RST_CHK_IDLE_EN 0x28
  746. +
  747. +/* ethernet reset control register */
  748. #define ETHSYS_RSTCTRL 0x34
  749. #define RSTCTRL_FE BIT(6)
  750. #define RSTCTRL_PPE BIT(31)
  751. @@ -454,6 +528,17 @@ struct mtk_rx_dma {
  752. unsigned int rxd4;
  753. } __packed __aligned(4);
  754. +struct mtk_rx_dma_v2 {
  755. + unsigned int rxd1;
  756. + unsigned int rxd2;
  757. + unsigned int rxd3;
  758. + unsigned int rxd4;
  759. + unsigned int rxd5;
  760. + unsigned int rxd6;
  761. + unsigned int rxd7;
  762. + unsigned int rxd8;
  763. +} __packed __aligned(4);
  764. +
  765. struct mtk_tx_dma {
  766. unsigned int txd1;
  767. unsigned int txd2;
  768. @@ -461,6 +546,17 @@ struct mtk_tx_dma {
  769. unsigned int txd4;
  770. } __packed __aligned(4);
  771. +struct mtk_tx_dma_v2 {
  772. + unsigned int txd1;
  773. + unsigned int txd2;
  774. + unsigned int txd3;
  775. + unsigned int txd4;
  776. + unsigned int txd5;
  777. + unsigned int txd6;
  778. + unsigned int txd7;
  779. + unsigned int txd8;
  780. +} __packed __aligned(4);
  781. +
  782. struct mtk_eth;
  783. struct mtk_mac;
  784. @@ -647,7 +743,9 @@ enum mkt_eth_capabilities {
  785. MTK_SHARED_INT_BIT,
  786. MTK_TRGMII_MT7621_CLK_BIT,
  787. MTK_QDMA_BIT,
  788. + MTK_NETSYS_V2_BIT,
  789. MTK_SOC_MT7628_BIT,
  790. + MTK_RSTCTRL_PPE1_BIT,
  791. /* MUX BITS*/
  792. MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
  793. @@ -679,7 +777,9 @@ enum mkt_eth_capabilities {
  794. #define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT)
  795. #define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT)
  796. #define MTK_QDMA BIT(MTK_QDMA_BIT)
  797. +#define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT)
  798. #define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
  799. +#define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT)
  800. #define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
  801. BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
  802. @@ -756,6 +856,7 @@ struct mtk_tx_dma_desc_info {
  803. dma_addr_t addr;
  804. u32 size;
  805. u16 vlan_tci;
  806. + u16 qid;
  807. u8 gso:1;
  808. u8 csum:1;
  809. u8 vlan:1;
  810. @@ -813,6 +914,10 @@ struct mtk_reg_map {
  811. * the extra setup for those pins used by GMAC.
  812. * @txd_size Tx DMA descriptor size.
  813. * @rxd_size Rx DMA descriptor size.
  814. + * @rx_irq_done_mask Rx irq done register mask.
  815. + * @rx_dma_l4_valid Rx DMA valid register mask.
  816. + * @dma_max_len Max DMA tx/rx buffer length.
  817. + * @dma_len_offset Tx/Rx DMA length field offset.
  818. */
  819. struct mtk_soc_data {
  820. const struct mtk_reg_map *reg_map;
  821. @@ -825,6 +930,10 @@ struct mtk_soc_data {
  822. struct {
  823. u32 txd_size;
  824. u32 rxd_size;
  825. + u32 rx_irq_done_mask;
  826. + u32 rx_dma_l4_valid;
  827. + u32 dma_max_len;
  828. + u32 dma_len_offset;
  829. } txrx;
  830. };
  831. @@ -943,7 +1052,6 @@ struct mtk_eth {
  832. u32 tx_bytes;
  833. struct dim tx_dim;
  834. - u32 rx_dma_l4_valid;
  835. int ip_align;
  836. struct mtk_ppe *ppe;