961-net-ethernet-mediatek-split-tx-and-rx-fields-in-mtk_.patch 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599
  1. From: Lorenzo Bianconi <[email protected]>
  2. Date: Thu, 2 Nov 2023 16:47:07 +0100
  3. Subject: [PATCH net-next 1/2] net: ethernet: mediatek: split tx and rx fields
  4. in mtk_soc_data struct
  5. Split tx and rx fields in mtk_soc_data struct. This is a preliminary
  6. patch to roll back to QDMA for MT7986 SoC in order to fix a hw hang
  7. if the device receives a corrupted packet.
  8. Signed-off-by: Lorenzo Bianconi <[email protected]>
  9. ---
  10. drivers/net/ethernet/mediatek/mtk_eth_soc.c | 210 ++++++++++++--------
  11. drivers/net/ethernet/mediatek/mtk_eth_soc.h | 29 +--
  12. 2 files changed, 139 insertions(+), 100 deletions(-)
  13. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  14. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  15. @@ -1281,7 +1281,7 @@ static int mtk_init_fq_dma(struct mtk_et
  16. eth->scratch_ring = eth->sram_base;
  17. else
  18. eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
  19. - cnt * soc->txrx.txd_size,
  20. + cnt * soc->tx.desc_size,
  21. &eth->phy_scratch_ring,
  22. GFP_KERNEL);
  23. if (unlikely(!eth->scratch_ring))
  24. @@ -1297,16 +1297,16 @@ static int mtk_init_fq_dma(struct mtk_et
  25. if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
  26. return -ENOMEM;
  27. - phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
  28. + phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
  29. for (i = 0; i < cnt; i++) {
  30. struct mtk_tx_dma_v2 *txd;
  31. - txd = eth->scratch_ring + i * soc->txrx.txd_size;
  32. + txd = eth->scratch_ring + i * soc->tx.desc_size;
  33. txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
  34. if (i < cnt - 1)
  35. txd->txd2 = eth->phy_scratch_ring +
  36. - (i + 1) * soc->txrx.txd_size;
  37. + (i + 1) * soc->tx.desc_size;
  38. txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
  39. txd->txd4 = 0;
  40. @@ -1555,7 +1555,7 @@ static int mtk_tx_map(struct sk_buff *sk
  41. if (itxd == ring->last_free)
  42. return -ENOMEM;
  43. - itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
  44. + itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
  45. memset(itx_buf, 0, sizeof(*itx_buf));
  46. txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
  47. @@ -1596,7 +1596,7 @@ static int mtk_tx_map(struct sk_buff *sk
  48. memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
  49. txd_info.size = min_t(unsigned int, frag_size,
  50. - soc->txrx.dma_max_len);
  51. + soc->tx.dma_max_len);
  52. txd_info.qid = queue;
  53. txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
  54. !(frag_size - txd_info.size);
  55. @@ -1609,7 +1609,7 @@ static int mtk_tx_map(struct sk_buff *sk
  56. mtk_tx_set_dma_desc(dev, txd, &txd_info);
  57. tx_buf = mtk_desc_to_tx_buf(ring, txd,
  58. - soc->txrx.txd_size);
  59. + soc->tx.desc_size);
  60. if (new_desc)
  61. memset(tx_buf, 0, sizeof(*tx_buf));
  62. tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
  63. @@ -1652,7 +1652,7 @@ static int mtk_tx_map(struct sk_buff *sk
  64. } else {
  65. int next_idx;
  66. - next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
  67. + next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
  68. ring->dma_size);
  69. mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
  70. }
  71. @@ -1661,7 +1661,7 @@ static int mtk_tx_map(struct sk_buff *sk
  72. err_dma:
  73. do {
  74. - tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
  75. + tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
  76. /* unmap dma */
  77. mtk_tx_unmap(eth, tx_buf, NULL, false);
  78. @@ -1686,7 +1686,7 @@ static int mtk_cal_txd_req(struct mtk_et
  79. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  80. frag = &skb_shinfo(skb)->frags[i];
  81. nfrags += DIV_ROUND_UP(skb_frag_size(frag),
  82. - eth->soc->txrx.dma_max_len);
  83. + eth->soc->tx.dma_max_len);
  84. }
  85. } else {
  86. nfrags += skb_shinfo(skb)->nr_frags;
  87. @@ -1827,7 +1827,7 @@ static struct mtk_rx_ring *mtk_get_rx_ri
  88. ring = &eth->rx_ring[i];
  89. idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
  90. - rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
  91. + rxd = ring->dma + idx * eth->soc->rx.desc_size;
  92. if (rxd->rxd2 & RX_DMA_DONE) {
  93. ring->calc_idx_update = true;
  94. return ring;
  95. @@ -1995,7 +1995,7 @@ static int mtk_xdp_submit_frame(struct m
  96. }
  97. htxd = txd;
  98. - tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
  99. + tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
  100. memset(tx_buf, 0, sizeof(*tx_buf));
  101. htx_buf = tx_buf;
  102. @@ -2014,7 +2014,7 @@ static int mtk_xdp_submit_frame(struct m
  103. goto unmap;
  104. tx_buf = mtk_desc_to_tx_buf(ring, txd,
  105. - soc->txrx.txd_size);
  106. + soc->tx.desc_size);
  107. memset(tx_buf, 0, sizeof(*tx_buf));
  108. n_desc++;
  109. }
  110. @@ -2052,7 +2052,7 @@ static int mtk_xdp_submit_frame(struct m
  111. } else {
  112. int idx;
  113. - idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
  114. + idx = txd_to_idx(ring, txd, soc->tx.desc_size);
  115. mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
  116. MT7628_TX_CTX_IDX0);
  117. }
  118. @@ -2063,7 +2063,7 @@ static int mtk_xdp_submit_frame(struct m
  119. unmap:
  120. while (htxd != txd) {
  121. - tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
  122. + tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
  123. mtk_tx_unmap(eth, tx_buf, NULL, false);
  124. htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
  125. @@ -2194,7 +2194,7 @@ static int mtk_poll_rx(struct napi_struc
  126. goto rx_done;
  127. idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
  128. - rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
  129. + rxd = ring->dma + idx * eth->soc->rx.desc_size;
  130. data = ring->data[idx];
  131. if (!mtk_rx_get_desc(eth, &trxd, rxd))
  132. @@ -2329,7 +2329,7 @@ static int mtk_poll_rx(struct napi_struc
  133. rxdcsum = &trxd.rxd4;
  134. }
  135. - if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
  136. + if (*rxdcsum & eth->soc->rx.dma_l4_valid)
  137. skb->ip_summed = CHECKSUM_UNNECESSARY;
  138. else
  139. skb_checksum_none_assert(skb);
  140. @@ -2453,7 +2453,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
  141. break;
  142. tx_buf = mtk_desc_to_tx_buf(ring, desc,
  143. - eth->soc->txrx.txd_size);
  144. + eth->soc->tx.desc_size);
  145. if (!tx_buf->data)
  146. break;
  147. @@ -2504,7 +2504,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
  148. }
  149. mtk_tx_unmap(eth, tx_buf, &bq, true);
  150. - desc = ring->dma + cpu * eth->soc->txrx.txd_size;
  151. + desc = ring->dma + cpu * eth->soc->tx.desc_size;
  152. ring->last_free = desc;
  153. atomic_inc(&ring->free_count);
  154. @@ -2594,7 +2594,7 @@ static int mtk_napi_rx(struct napi_struc
  155. do {
  156. int rx_done;
  157. - mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
  158. + mtk_w32(eth, eth->soc->rx.irq_done_mask,
  159. reg_map->pdma.irq_status);
  160. rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
  161. rx_done_total += rx_done;
  162. @@ -2610,10 +2610,10 @@ static int mtk_napi_rx(struct napi_struc
  163. return budget;
  164. } while (mtk_r32(eth, reg_map->pdma.irq_status) &
  165. - eth->soc->txrx.rx_irq_done_mask);
  166. + eth->soc->rx.irq_done_mask);
  167. if (napi_complete_done(napi, rx_done_total))
  168. - mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
  169. + mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
  170. return rx_done_total;
  171. }
  172. @@ -2622,7 +2622,7 @@ static int mtk_tx_alloc(struct mtk_eth *
  173. {
  174. const struct mtk_soc_data *soc = eth->soc;
  175. struct mtk_tx_ring *ring = &eth->tx_ring;
  176. - int i, sz = soc->txrx.txd_size;
  177. + int i, sz = soc->tx.desc_size;
  178. struct mtk_tx_dma_v2 *txd;
  179. int ring_size;
  180. u32 ofs, val;
  181. @@ -2745,14 +2745,14 @@ static void mtk_tx_clean(struct mtk_eth
  182. }
  183. if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
  184. dma_free_coherent(eth->dma_dev,
  185. - ring->dma_size * soc->txrx.txd_size,
  186. + ring->dma_size * soc->tx.desc_size,
  187. ring->dma, ring->phys);
  188. ring->dma = NULL;
  189. }
  190. if (ring->dma_pdma) {
  191. dma_free_coherent(eth->dma_dev,
  192. - ring->dma_size * soc->txrx.txd_size,
  193. + ring->dma_size * soc->tx.desc_size,
  194. ring->dma_pdma, ring->phys_pdma);
  195. ring->dma_pdma = NULL;
  196. }
  197. @@ -2807,15 +2807,15 @@ static int mtk_rx_alloc(struct mtk_eth *
  198. if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
  199. rx_flag != MTK_RX_FLAGS_NORMAL) {
  200. ring->dma = dma_alloc_coherent(eth->dma_dev,
  201. - rx_dma_size * eth->soc->txrx.rxd_size,
  202. - &ring->phys, GFP_KERNEL);
  203. + rx_dma_size * eth->soc->rx.desc_size,
  204. + &ring->phys, GFP_KERNEL);
  205. } else {
  206. struct mtk_tx_ring *tx_ring = &eth->tx_ring;
  207. ring->dma = tx_ring->dma + tx_ring_size *
  208. - eth->soc->txrx.txd_size * (ring_no + 1);
  209. + eth->soc->tx.desc_size * (ring_no + 1);
  210. ring->phys = tx_ring->phys + tx_ring_size *
  211. - eth->soc->txrx.txd_size * (ring_no + 1);
  212. + eth->soc->tx.desc_size * (ring_no + 1);
  213. }
  214. if (!ring->dma)
  215. @@ -2826,7 +2826,7 @@ static int mtk_rx_alloc(struct mtk_eth *
  216. dma_addr_t dma_addr;
  217. void *data;
  218. - rxd = ring->dma + i * eth->soc->txrx.rxd_size;
  219. + rxd = ring->dma + i * eth->soc->rx.desc_size;
  220. if (ring->page_pool) {
  221. data = mtk_page_pool_get_buff(ring->page_pool,
  222. &dma_addr, GFP_KERNEL);
  223. @@ -2917,7 +2917,7 @@ static void mtk_rx_clean(struct mtk_eth
  224. if (!ring->data[i])
  225. continue;
  226. - rxd = ring->dma + i * eth->soc->txrx.rxd_size;
  227. + rxd = ring->dma + i * eth->soc->rx.desc_size;
  228. if (!rxd->rxd1)
  229. continue;
  230. @@ -2934,7 +2934,7 @@ static void mtk_rx_clean(struct mtk_eth
  231. if (!in_sram && ring->dma) {
  232. dma_free_coherent(eth->dma_dev,
  233. - ring->dma_size * eth->soc->txrx.rxd_size,
  234. + ring->dma_size * eth->soc->rx.desc_size,
  235. ring->dma, ring->phys);
  236. ring->dma = NULL;
  237. }
  238. @@ -3297,7 +3297,7 @@ static void mtk_dma_free(struct mtk_eth
  239. netdev_reset_queue(eth->netdev[i]);
  240. if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
  241. dma_free_coherent(eth->dma_dev,
  242. - MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
  243. + MTK_QDMA_RING_SIZE * soc->tx.desc_size,
  244. eth->scratch_ring, eth->phy_scratch_ring);
  245. eth->scratch_ring = NULL;
  246. eth->phy_scratch_ring = 0;
  247. @@ -3347,7 +3347,7 @@ static irqreturn_t mtk_handle_irq_rx(int
  248. eth->rx_events++;
  249. if (likely(napi_schedule_prep(&eth->rx_napi))) {
  250. - mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
  251. + mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
  252. __napi_schedule(&eth->rx_napi);
  253. }
  254. @@ -3373,9 +3373,9 @@ static irqreturn_t mtk_handle_irq(int ir
  255. const struct mtk_reg_map *reg_map = eth->soc->reg_map;
  256. if (mtk_r32(eth, reg_map->pdma.irq_mask) &
  257. - eth->soc->txrx.rx_irq_done_mask) {
  258. + eth->soc->rx.irq_done_mask) {
  259. if (mtk_r32(eth, reg_map->pdma.irq_status) &
  260. - eth->soc->txrx.rx_irq_done_mask)
  261. + eth->soc->rx.irq_done_mask)
  262. mtk_handle_irq_rx(irq, _eth);
  263. }
  264. if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
  265. @@ -3393,10 +3393,10 @@ static void mtk_poll_controller(struct n
  266. struct mtk_eth *eth = mac->hw;
  267. mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
  268. - mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
  269. + mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
  270. mtk_handle_irq_rx(eth->irq[2], dev);
  271. mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
  272. - mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
  273. + mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
  274. }
  275. #endif
  276. @@ -3563,7 +3563,7 @@ static int mtk_open(struct net_device *d
  277. napi_enable(&eth->tx_napi);
  278. napi_enable(&eth->rx_napi);
  279. mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
  280. - mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
  281. + mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
  282. refcount_set(&eth->dma_refcnt, 1);
  283. }
  284. else
  285. @@ -3647,7 +3647,7 @@ static int mtk_stop(struct net_device *d
  286. mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
  287. mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
  288. - mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
  289. + mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
  290. napi_disable(&eth->tx_napi);
  291. napi_disable(&eth->rx_napi);
  292. @@ -4126,9 +4126,9 @@ static int mtk_hw_init(struct mtk_eth *e
  293. /* FE int grouping */
  294. mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
  295. - mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
  296. + mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
  297. mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
  298. - mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
  299. + mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
  300. mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
  301. if (mtk_is_netsys_v3_or_greater(eth)) {
  302. @@ -5305,11 +5305,15 @@ static const struct mtk_soc_data mt2701_
  303. .required_clks = MT7623_CLKS_BITMAP,
  304. .required_pctl = true,
  305. .version = 1,
  306. - .txrx = {
  307. - .txd_size = sizeof(struct mtk_tx_dma),
  308. - .rxd_size = sizeof(struct mtk_rx_dma),
  309. - .rx_irq_done_mask = MTK_RX_DONE_INT,
  310. - .rx_dma_l4_valid = RX_DMA_L4_VALID,
  311. + .tx = {
  312. + .desc_size = sizeof(struct mtk_tx_dma),
  313. + .dma_max_len = MTK_TX_DMA_BUF_LEN,
  314. + .dma_len_offset = 16,
  315. + },
  316. + .rx = {
  317. + .desc_size = sizeof(struct mtk_rx_dma),
  318. + .irq_done_mask = MTK_RX_DONE_INT,
  319. + .dma_l4_valid = RX_DMA_L4_VALID,
  320. .dma_max_len = MTK_TX_DMA_BUF_LEN,
  321. .dma_len_offset = 16,
  322. },
  323. @@ -5325,11 +5329,15 @@ static const struct mtk_soc_data mt7621_
  324. .offload_version = 1,
  325. .hash_offset = 2,
  326. .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
  327. - .txrx = {
  328. - .txd_size = sizeof(struct mtk_tx_dma),
  329. - .rxd_size = sizeof(struct mtk_rx_dma),
  330. - .rx_irq_done_mask = MTK_RX_DONE_INT,
  331. - .rx_dma_l4_valid = RX_DMA_L4_VALID,
  332. + .tx = {
  333. + .desc_size = sizeof(struct mtk_tx_dma),
  334. + .dma_max_len = MTK_TX_DMA_BUF_LEN,
  335. + .dma_len_offset = 16,
  336. + },
  337. + .rx = {
  338. + .desc_size = sizeof(struct mtk_rx_dma),
  339. + .irq_done_mask = MTK_RX_DONE_INT,
  340. + .dma_l4_valid = RX_DMA_L4_VALID,
  341. .dma_max_len = MTK_TX_DMA_BUF_LEN,
  342. .dma_len_offset = 16,
  343. },
  344. @@ -5347,11 +5355,15 @@ static const struct mtk_soc_data mt7622_
  345. .hash_offset = 2,
  346. .has_accounting = true,
  347. .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
  348. - .txrx = {
  349. - .txd_size = sizeof(struct mtk_tx_dma),
  350. - .rxd_size = sizeof(struct mtk_rx_dma),
  351. - .rx_irq_done_mask = MTK_RX_DONE_INT,
  352. - .rx_dma_l4_valid = RX_DMA_L4_VALID,
  353. + .tx = {
  354. + .desc_size = sizeof(struct mtk_tx_dma),
  355. + .dma_max_len = MTK_TX_DMA_BUF_LEN,
  356. + .dma_len_offset = 16,
  357. + },
  358. + .rx = {
  359. + .desc_size = sizeof(struct mtk_rx_dma),
  360. + .irq_done_mask = MTK_RX_DONE_INT,
  361. + .dma_l4_valid = RX_DMA_L4_VALID,
  362. .dma_max_len = MTK_TX_DMA_BUF_LEN,
  363. .dma_len_offset = 16,
  364. },
  365. @@ -5368,11 +5380,15 @@ static const struct mtk_soc_data mt7623_
  366. .hash_offset = 2,
  367. .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
  368. .disable_pll_modes = true,
  369. - .txrx = {
  370. - .txd_size = sizeof(struct mtk_tx_dma),
  371. - .rxd_size = sizeof(struct mtk_rx_dma),
  372. - .rx_irq_done_mask = MTK_RX_DONE_INT,
  373. - .rx_dma_l4_valid = RX_DMA_L4_VALID,
  374. + .tx = {
  375. + .desc_size = sizeof(struct mtk_tx_dma),
  376. + .dma_max_len = MTK_TX_DMA_BUF_LEN,
  377. + .dma_len_offset = 16,
  378. + },
  379. + .rx = {
  380. + .desc_size = sizeof(struct mtk_rx_dma),
  381. + .irq_done_mask = MTK_RX_DONE_INT,
  382. + .dma_l4_valid = RX_DMA_L4_VALID,
  383. .dma_max_len = MTK_TX_DMA_BUF_LEN,
  384. .dma_len_offset = 16,
  385. },
  386. @@ -5387,11 +5403,15 @@ static const struct mtk_soc_data mt7629_
  387. .required_pctl = false,
  388. .has_accounting = true,
  389. .version = 1,
  390. - .txrx = {
  391. - .txd_size = sizeof(struct mtk_tx_dma),
  392. - .rxd_size = sizeof(struct mtk_rx_dma),
  393. - .rx_irq_done_mask = MTK_RX_DONE_INT,
  394. - .rx_dma_l4_valid = RX_DMA_L4_VALID,
  395. + .tx = {
  396. + .desc_size = sizeof(struct mtk_tx_dma),
  397. + .dma_max_len = MTK_TX_DMA_BUF_LEN,
  398. + .dma_len_offset = 16,
  399. + },
  400. + .rx = {
  401. + .desc_size = sizeof(struct mtk_rx_dma),
  402. + .irq_done_mask = MTK_RX_DONE_INT,
  403. + .dma_l4_valid = RX_DMA_L4_VALID,
  404. .dma_max_len = MTK_TX_DMA_BUF_LEN,
  405. .dma_len_offset = 16,
  406. },
  407. @@ -5409,11 +5429,15 @@ static const struct mtk_soc_data mt7981_
  408. .hash_offset = 4,
  409. .has_accounting = true,
  410. .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
  411. - .txrx = {
  412. - .txd_size = sizeof(struct mtk_tx_dma_v2),
  413. - .rxd_size = sizeof(struct mtk_rx_dma_v2),
  414. - .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
  415. - .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
  416. + .tx = {
  417. + .desc_size = sizeof(struct mtk_tx_dma_v2),
  418. + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
  419. + .dma_len_offset = 8,
  420. + },
  421. + .rx = {
  422. + .desc_size = sizeof(struct mtk_rx_dma_v2),
  423. + .irq_done_mask = MTK_RX_DONE_INT_V2,
  424. + .dma_l4_valid = RX_DMA_L4_VALID_V2,
  425. .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
  426. .dma_len_offset = 8,
  427. },
  428. @@ -5431,11 +5455,15 @@ static const struct mtk_soc_data mt7986_
  429. .hash_offset = 4,
  430. .has_accounting = true,
  431. .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
  432. - .txrx = {
  433. - .txd_size = sizeof(struct mtk_tx_dma_v2),
  434. - .rxd_size = sizeof(struct mtk_rx_dma_v2),
  435. - .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
  436. - .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
  437. + .tx = {
  438. + .desc_size = sizeof(struct mtk_tx_dma_v2),
  439. + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
  440. + .dma_len_offset = 8,
  441. + },
  442. + .rx = {
  443. + .desc_size = sizeof(struct mtk_rx_dma_v2),
  444. + .irq_done_mask = MTK_RX_DONE_INT_V2,
  445. + .dma_l4_valid = RX_DMA_L4_VALID_V2,
  446. .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
  447. .dma_len_offset = 8,
  448. },
  449. @@ -5453,11 +5481,15 @@ static const struct mtk_soc_data mt7988_
  450. .hash_offset = 4,
  451. .has_accounting = true,
  452. .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
  453. - .txrx = {
  454. - .txd_size = sizeof(struct mtk_tx_dma_v2),
  455. - .rxd_size = sizeof(struct mtk_rx_dma_v2),
  456. - .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
  457. - .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
  458. + .tx = {
  459. + .desc_size = sizeof(struct mtk_tx_dma_v2),
  460. + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
  461. + .dma_len_offset = 8,
  462. + },
  463. + .rx = {
  464. + .desc_size = sizeof(struct mtk_rx_dma_v2),
  465. + .irq_done_mask = MTK_RX_DONE_INT_V2,
  466. + .dma_l4_valid = RX_DMA_L4_VALID_V2,
  467. .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
  468. .dma_len_offset = 8,
  469. },
  470. @@ -5470,11 +5502,15 @@ static const struct mtk_soc_data rt5350_
  471. .required_clks = MT7628_CLKS_BITMAP,
  472. .required_pctl = false,
  473. .version = 1,
  474. - .txrx = {
  475. - .txd_size = sizeof(struct mtk_tx_dma),
  476. - .rxd_size = sizeof(struct mtk_rx_dma),
  477. - .rx_irq_done_mask = MTK_RX_DONE_INT,
  478. - .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
  479. + .tx = {
  480. + .desc_size = sizeof(struct mtk_tx_dma),
  481. + .dma_max_len = MTK_TX_DMA_BUF_LEN,
  482. + .dma_len_offset = 16,
  483. + },
  484. + .rx = {
  485. + .desc_size = sizeof(struct mtk_rx_dma),
  486. + .irq_done_mask = MTK_RX_DONE_INT,
  487. + .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
  488. .dma_max_len = MTK_TX_DMA_BUF_LEN,
  489. .dma_len_offset = 16,
  490. },
  491. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  492. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  493. @@ -327,8 +327,8 @@
  494. /* QDMA descriptor txd3 */
  495. #define TX_DMA_OWNER_CPU BIT(31)
  496. #define TX_DMA_LS0 BIT(30)
  497. -#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
  498. -#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
  499. +#define TX_DMA_PLEN0(x) (((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset)
  500. +#define TX_DMA_PLEN1(x) ((x) & eth->soc->tx.dma_max_len)
  501. #define TX_DMA_SWC BIT(14)
  502. #define TX_DMA_PQID GENMASK(3, 0)
  503. #define TX_DMA_ADDR64_MASK GENMASK(3, 0)
  504. @@ -348,8 +348,8 @@
  505. /* QDMA descriptor rxd2 */
  506. #define RX_DMA_DONE BIT(31)
  507. #define RX_DMA_LSO BIT(30)
  508. -#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
  509. -#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
  510. +#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset)
  511. +#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len)
  512. #define RX_DMA_VTAG BIT(15)
  513. #define RX_DMA_ADDR64_MASK GENMASK(3, 0)
  514. #if IS_ENABLED(CONFIG_64BIT)
  515. @@ -1209,10 +1209,9 @@ struct mtk_reg_map {
  516. * @foe_entry_size Foe table entry size.
  517. * @has_accounting Bool indicating support for accounting of
  518. * offloaded flows.
  519. - * @txd_size Tx DMA descriptor size.
  520. - * @rxd_size Rx DMA descriptor size.
  521. - * @rx_irq_done_mask Rx irq done register mask.
  522. - * @rx_dma_l4_valid Rx DMA valid register mask.
  523. + * @desc_size Tx/Rx DMA descriptor size.
  524. + * @irq_done_mask Rx irq done register mask.
  525. + * @dma_l4_valid Rx DMA valid register mask.
  526. * @dma_max_len Max DMA tx/rx buffer length.
  527. * @dma_len_offset Tx/Rx DMA length field offset.
  528. */
  529. @@ -1230,13 +1229,17 @@ struct mtk_soc_data {
  530. bool has_accounting;
  531. bool disable_pll_modes;
  532. struct {
  533. - u32 txd_size;
  534. - u32 rxd_size;
  535. - u32 rx_irq_done_mask;
  536. - u32 rx_dma_l4_valid;
  537. + u32 desc_size;
  538. u32 dma_max_len;
  539. u32 dma_len_offset;
  540. - } txrx;
  541. + } tx;
  542. + struct {
  543. + u32 desc_size;
  544. + u32 irq_done_mask;
  545. + u32 dma_l4_valid;
  546. + u32 dma_max_len;
  547. + u32 dma_len_offset;
  548. + } rx;
  549. };
  550. #define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)