2
0

702-v5.19-26-net-ethernet-mtk_eth_soc-introduce-device-register-m.patch 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814
  1. From: Lorenzo Bianconi <[email protected]>
  2. Date: Fri, 20 May 2022 20:11:35 +0200
  3. Subject: [PATCH] net: ethernet: mtk_eth_soc: introduce device register map
  4. Introduce reg_map structure to add the capability to support different
  5. register definitions. Move register definitions in mtk_regmap structure.
  6. This is a preliminary patch to introduce mt7986 ethernet support.
  7. Tested-by: Sam Shih <[email protected]>
  8. Signed-off-by: Lorenzo Bianconi <[email protected]>
  9. Signed-off-by: David S. Miller <[email protected]>
  10. ---
  11. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  12. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  13. @@ -34,6 +34,59 @@ MODULE_PARM_DESC(msg_level, "Message lev
  14. #define MTK_ETHTOOL_STAT(x) { #x, \
  15. offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
  16. +static const struct mtk_reg_map mtk_reg_map = {
  17. + .tx_irq_mask = 0x1a1c,
  18. + .tx_irq_status = 0x1a18,
  19. + .pdma = {
  20. + .rx_ptr = 0x0900,
  21. + .rx_cnt_cfg = 0x0904,
  22. + .pcrx_ptr = 0x0908,
  23. + .glo_cfg = 0x0a04,
  24. + .rst_idx = 0x0a08,
  25. + .delay_irq = 0x0a0c,
  26. + .irq_status = 0x0a20,
  27. + .irq_mask = 0x0a28,
  28. + .int_grp = 0x0a50,
  29. + },
  30. + .qdma = {
  31. + .qtx_cfg = 0x1800,
  32. + .rx_ptr = 0x1900,
  33. + .rx_cnt_cfg = 0x1904,
  34. + .qcrx_ptr = 0x1908,
  35. + .glo_cfg = 0x1a04,
  36. + .rst_idx = 0x1a08,
  37. + .delay_irq = 0x1a0c,
  38. + .fc_th = 0x1a10,
  39. + .int_grp = 0x1a20,
  40. + .hred = 0x1a44,
  41. + .ctx_ptr = 0x1b00,
  42. + .dtx_ptr = 0x1b04,
  43. + .crx_ptr = 0x1b10,
  44. + .drx_ptr = 0x1b14,
  45. + .fq_head = 0x1b20,
  46. + .fq_tail = 0x1b24,
  47. + .fq_count = 0x1b28,
  48. + .fq_blen = 0x1b2c,
  49. + },
  50. + .gdm1_cnt = 0x2400,
  51. +};
  52. +
  53. +static const struct mtk_reg_map mt7628_reg_map = {
  54. + .tx_irq_mask = 0x0a28,
  55. + .tx_irq_status = 0x0a20,
  56. + .pdma = {
  57. + .rx_ptr = 0x0900,
  58. + .rx_cnt_cfg = 0x0904,
  59. + .pcrx_ptr = 0x0908,
  60. + .glo_cfg = 0x0a04,
  61. + .rst_idx = 0x0a08,
  62. + .delay_irq = 0x0a0c,
  63. + .irq_status = 0x0a20,
  64. + .irq_mask = 0x0a28,
  65. + .int_grp = 0x0a50,
  66. + },
  67. +};
  68. +
  69. /* strings used by ethtool */
  70. static const struct mtk_ethtool_stats {
  71. char str[ETH_GSTRING_LEN];
  72. @@ -629,8 +682,8 @@ static inline void mtk_tx_irq_disable(st
  73. u32 val;
  74. spin_lock_irqsave(&eth->tx_irq_lock, flags);
  75. - val = mtk_r32(eth, eth->tx_int_mask_reg);
  76. - mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
  77. + val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
  78. + mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
  79. spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
  80. }
  81. @@ -640,8 +693,8 @@ static inline void mtk_tx_irq_enable(str
  82. u32 val;
  83. spin_lock_irqsave(&eth->tx_irq_lock, flags);
  84. - val = mtk_r32(eth, eth->tx_int_mask_reg);
  85. - mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
  86. + val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
  87. + mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
  88. spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
  89. }
  90. @@ -651,8 +704,8 @@ static inline void mtk_rx_irq_disable(st
  91. u32 val;
  92. spin_lock_irqsave(&eth->rx_irq_lock, flags);
  93. - val = mtk_r32(eth, MTK_PDMA_INT_MASK);
  94. - mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
  95. + val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
  96. + mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
  97. spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
  98. }
  99. @@ -662,8 +715,8 @@ static inline void mtk_rx_irq_enable(str
  100. u32 val;
  101. spin_lock_irqsave(&eth->rx_irq_lock, flags);
  102. - val = mtk_r32(eth, MTK_PDMA_INT_MASK);
  103. - mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
  104. + val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
  105. + mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
  106. spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
  107. }
  108. @@ -714,39 +767,39 @@ void mtk_stats_update_mac(struct mtk_mac
  109. hw_stats->rx_checksum_errors +=
  110. mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
  111. } else {
  112. + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
  113. unsigned int offs = hw_stats->reg_offset;
  114. u64 stats;
  115. - hw_stats->rx_bytes += mtk_r32(mac->hw,
  116. - MTK_GDM1_RX_GBCNT_L + offs);
  117. - stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
  118. + hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
  119. + stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
  120. if (stats)
  121. hw_stats->rx_bytes += (stats << 32);
  122. hw_stats->rx_packets +=
  123. - mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
  124. + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
  125. hw_stats->rx_overflow +=
  126. - mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
  127. + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
  128. hw_stats->rx_fcs_errors +=
  129. - mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
  130. + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
  131. hw_stats->rx_short_errors +=
  132. - mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
  133. + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
  134. hw_stats->rx_long_errors +=
  135. - mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
  136. + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
  137. hw_stats->rx_checksum_errors +=
  138. - mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
  139. + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
  140. hw_stats->rx_flow_control_packets +=
  141. - mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
  142. + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
  143. hw_stats->tx_skip +=
  144. - mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
  145. + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
  146. hw_stats->tx_collisions +=
  147. - mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
  148. + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
  149. hw_stats->tx_bytes +=
  150. - mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
  151. - stats = mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
  152. + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
  153. + stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
  154. if (stats)
  155. hw_stats->tx_bytes += (stats << 32);
  156. hw_stats->tx_packets +=
  157. - mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
  158. + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
  159. }
  160. u64_stats_update_end(&hw_stats->syncp);
  161. @@ -886,10 +939,10 @@ static int mtk_init_fq_dma(struct mtk_et
  162. txd->txd4 = 0;
  163. }
  164. - mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
  165. - mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
  166. - mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
  167. - mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
  168. + mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
  169. + mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
  170. + mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
  171. + mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
  172. return 0;
  173. }
  174. @@ -1133,7 +1186,7 @@ static int mtk_tx_map(struct sk_buff *sk
  175. if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
  176. if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
  177. !netdev_xmit_more())
  178. - mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
  179. + mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
  180. } else {
  181. int next_idx;
  182. @@ -1450,6 +1503,7 @@ rx_done:
  183. static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
  184. unsigned int *done, unsigned int *bytes)
  185. {
  186. + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
  187. struct mtk_tx_ring *ring = &eth->tx_ring;
  188. struct mtk_tx_dma *desc;
  189. struct sk_buff *skb;
  190. @@ -1457,7 +1511,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
  191. u32 cpu, dma;
  192. cpu = ring->last_free_ptr;
  193. - dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
  194. + dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
  195. desc = mtk_qdma_phys_to_virt(ring, cpu);
  196. @@ -1492,7 +1546,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
  197. }
  198. ring->last_free_ptr = cpu;
  199. - mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
  200. + mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
  201. return budget;
  202. }
  203. @@ -1585,24 +1639,25 @@ static void mtk_handle_status_irq(struct
  204. static int mtk_napi_tx(struct napi_struct *napi, int budget)
  205. {
  206. struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
  207. + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
  208. int tx_done = 0;
  209. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
  210. mtk_handle_status_irq(eth);
  211. - mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
  212. + mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
  213. tx_done = mtk_poll_tx(eth, budget);
  214. if (unlikely(netif_msg_intr(eth))) {
  215. dev_info(eth->dev,
  216. "done tx %d, intr 0x%08x/0x%x\n", tx_done,
  217. - mtk_r32(eth, eth->tx_int_status_reg),
  218. - mtk_r32(eth, eth->tx_int_mask_reg));
  219. + mtk_r32(eth, reg_map->tx_irq_status),
  220. + mtk_r32(eth, reg_map->tx_irq_mask));
  221. }
  222. if (tx_done == budget)
  223. return budget;
  224. - if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
  225. + if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
  226. return budget;
  227. if (napi_complete_done(napi, tx_done))
  228. @@ -1614,6 +1669,7 @@ static int mtk_napi_tx(struct napi_struc
  229. static int mtk_napi_rx(struct napi_struct *napi, int budget)
  230. {
  231. struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
  232. + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
  233. int rx_done_total = 0;
  234. mtk_handle_status_irq(eth);
  235. @@ -1621,21 +1677,21 @@ static int mtk_napi_rx(struct napi_struc
  236. do {
  237. int rx_done;
  238. - mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
  239. + mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.irq_status);
  240. rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
  241. rx_done_total += rx_done;
  242. if (unlikely(netif_msg_intr(eth))) {
  243. dev_info(eth->dev,
  244. "done rx %d, intr 0x%08x/0x%x\n", rx_done,
  245. - mtk_r32(eth, MTK_PDMA_INT_STATUS),
  246. - mtk_r32(eth, MTK_PDMA_INT_MASK));
  247. + mtk_r32(eth, reg_map->pdma.irq_status),
  248. + mtk_r32(eth, reg_map->pdma.irq_mask));
  249. }
  250. if (rx_done_total == budget)
  251. return budget;
  252. - } while (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT);
  253. + } while (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT);
  254. if (napi_complete_done(napi, rx_done_total))
  255. mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
  256. @@ -1698,20 +1754,20 @@ static int mtk_tx_alloc(struct mtk_eth *
  257. */
  258. wmb();
  259. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
  260. - mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
  261. - mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
  262. + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
  263. + mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
  264. + mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
  265. mtk_w32(eth,
  266. ring->phys + ((MTK_DMA_SIZE - 1) * sz),
  267. - MTK_QTX_CRX_PTR);
  268. - mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
  269. + soc->reg_map->qdma.crx_ptr);
  270. + mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
  271. mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
  272. - MTK_QTX_CFG(0));
  273. + soc->reg_map->qdma.qtx_cfg);
  274. } else {
  275. mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
  276. mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
  277. mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
  278. - mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
  279. + mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
  280. }
  281. return 0;
  282. @@ -1750,6 +1806,7 @@ static void mtk_tx_clean(struct mtk_eth
  283. static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
  284. {
  285. + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
  286. struct mtk_rx_ring *ring;
  287. int rx_data_len, rx_dma_size;
  288. int i;
  289. @@ -1818,16 +1875,18 @@ static int mtk_rx_alloc(struct mtk_eth *
  290. ring->dma_size = rx_dma_size;
  291. ring->calc_idx_update = false;
  292. ring->calc_idx = rx_dma_size - 1;
  293. - ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
  294. + ring->crx_idx_reg = reg_map->pdma.pcrx_ptr + ring_no * MTK_QRX_OFFSET;
  295. /* make sure that all changes to the dma ring are flushed before we
  296. * continue
  297. */
  298. wmb();
  299. - mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
  300. - mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
  301. + mtk_w32(eth, ring->phys,
  302. + reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET + offset);
  303. + mtk_w32(eth, rx_dma_size,
  304. + reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET + offset);
  305. mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
  306. - mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
  307. + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), reg_map->pdma.rst_idx + offset);
  308. return 0;
  309. }
  310. @@ -2139,9 +2198,9 @@ static int mtk_dma_busy_wait(struct mtk_
  311. u32 val;
  312. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
  313. - reg = MTK_QDMA_GLO_CFG;
  314. + reg = eth->soc->reg_map->qdma.glo_cfg;
  315. else
  316. - reg = MTK_PDMA_GLO_CFG;
  317. + reg = eth->soc->reg_map->pdma.glo_cfg;
  318. ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
  319. !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
  320. @@ -2199,8 +2258,8 @@ static int mtk_dma_init(struct mtk_eth *
  321. * automatically
  322. */
  323. mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
  324. - FC_THRES_MIN, MTK_QDMA_FC_THRES);
  325. - mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
  326. + FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
  327. + mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
  328. }
  329. return 0;
  330. @@ -2274,13 +2333,14 @@ static irqreturn_t mtk_handle_irq_tx(int
  331. static irqreturn_t mtk_handle_irq(int irq, void *_eth)
  332. {
  333. struct mtk_eth *eth = _eth;
  334. + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
  335. - if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
  336. - if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
  337. + if (mtk_r32(eth, reg_map->pdma.irq_mask) & MTK_RX_DONE_INT) {
  338. + if (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT)
  339. mtk_handle_irq_rx(irq, _eth);
  340. }
  341. - if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
  342. - if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
  343. + if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
  344. + if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
  345. mtk_handle_irq_tx(irq, _eth);
  346. }
  347. @@ -2304,6 +2364,7 @@ static void mtk_poll_controller(struct n
  348. static int mtk_start_dma(struct mtk_eth *eth)
  349. {
  350. u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
  351. + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
  352. int err;
  353. err = mtk_dma_init(eth);
  354. @@ -2318,16 +2379,15 @@ static int mtk_start_dma(struct mtk_eth
  355. MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
  356. MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
  357. MTK_RX_BT_32DWORDS,
  358. - MTK_QDMA_GLO_CFG);
  359. -
  360. + reg_map->qdma.glo_cfg);
  361. mtk_w32(eth,
  362. MTK_RX_DMA_EN | rx_2b_offset |
  363. MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
  364. - MTK_PDMA_GLO_CFG);
  365. + reg_map->pdma.glo_cfg);
  366. } else {
  367. mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
  368. MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
  369. - MTK_PDMA_GLO_CFG);
  370. + reg_map->pdma.glo_cfg);
  371. }
  372. return 0;
  373. @@ -2453,8 +2513,8 @@ static int mtk_stop(struct net_device *d
  374. cancel_work_sync(&eth->tx_dim.work);
  375. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
  376. - mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
  377. - mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
  378. + mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
  379. + mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
  380. mtk_dma_free(eth);
  381. @@ -2508,6 +2568,7 @@ static void mtk_dim_rx(struct work_struc
  382. {
  383. struct dim *dim = container_of(work, struct dim, work);
  384. struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
  385. + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
  386. struct dim_cq_moder cur_profile;
  387. u32 val, cur;
  388. @@ -2515,7 +2576,7 @@ static void mtk_dim_rx(struct work_struc
  389. dim->profile_ix);
  390. spin_lock_bh(&eth->dim_lock);
  391. - val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
  392. + val = mtk_r32(eth, reg_map->pdma.delay_irq);
  393. val &= MTK_PDMA_DELAY_TX_MASK;
  394. val |= MTK_PDMA_DELAY_RX_EN;
  395. @@ -2525,9 +2586,9 @@ static void mtk_dim_rx(struct work_struc
  396. cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
  397. val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
  398. - mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
  399. + mtk_w32(eth, val, reg_map->pdma.delay_irq);
  400. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
  401. - mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
  402. + mtk_w32(eth, val, reg_map->qdma.delay_irq);
  403. spin_unlock_bh(&eth->dim_lock);
  404. @@ -2538,6 +2599,7 @@ static void mtk_dim_tx(struct work_struc
  405. {
  406. struct dim *dim = container_of(work, struct dim, work);
  407. struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
  408. + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
  409. struct dim_cq_moder cur_profile;
  410. u32 val, cur;
  411. @@ -2545,7 +2607,7 @@ static void mtk_dim_tx(struct work_struc
  412. dim->profile_ix);
  413. spin_lock_bh(&eth->dim_lock);
  414. - val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
  415. + val = mtk_r32(eth, reg_map->pdma.delay_irq);
  416. val &= MTK_PDMA_DELAY_RX_MASK;
  417. val |= MTK_PDMA_DELAY_TX_EN;
  418. @@ -2555,9 +2617,9 @@ static void mtk_dim_tx(struct work_struc
  419. cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
  420. val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
  421. - mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
  422. + mtk_w32(eth, val, reg_map->pdma.delay_irq);
  423. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
  424. - mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
  425. + mtk_w32(eth, val, reg_map->qdma.delay_irq);
  426. spin_unlock_bh(&eth->dim_lock);
  427. @@ -2568,6 +2630,7 @@ static int mtk_hw_init(struct mtk_eth *e
  428. {
  429. u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
  430. ETHSYS_DMA_AG_MAP_PPE;
  431. + const struct mtk_reg_map *reg_map = eth->soc->reg_map;
  432. int i, val, ret;
  433. if (test_and_set_bit(MTK_HW_INIT, &eth->state))
  434. @@ -2642,10 +2705,10 @@ static int mtk_hw_init(struct mtk_eth *e
  435. mtk_rx_irq_disable(eth, ~0);
  436. /* FE int grouping */
  437. - mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
  438. - mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
  439. - mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
  440. - mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
  441. + mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
  442. + mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.int_grp + 4);
  443. + mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
  444. + mtk_w32(eth, MTK_RX_DONE_INT, reg_map->qdma.int_grp + 4);
  445. mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
  446. return 0;
  447. @@ -3177,14 +3240,6 @@ static int mtk_probe(struct platform_dev
  448. if (IS_ERR(eth->base))
  449. return PTR_ERR(eth->base);
  450. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
  451. - eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
  452. - eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
  453. - } else {
  454. - eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
  455. - eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
  456. - }
  457. -
  458. if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
  459. eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
  460. eth->ip_align = NET_IP_ALIGN;
  461. @@ -3418,6 +3473,7 @@ static int mtk_remove(struct platform_de
  462. }
  463. static const struct mtk_soc_data mt2701_data = {
  464. + .reg_map = &mtk_reg_map,
  465. .caps = MT7623_CAPS | MTK_HWLRO,
  466. .hw_features = MTK_HW_FEATURES,
  467. .required_clks = MT7623_CLKS_BITMAP,
  468. @@ -3429,6 +3485,7 @@ static const struct mtk_soc_data mt2701_
  469. };
  470. static const struct mtk_soc_data mt7621_data = {
  471. + .reg_map = &mtk_reg_map,
  472. .caps = MT7621_CAPS,
  473. .hw_features = MTK_HW_FEATURES,
  474. .required_clks = MT7621_CLKS_BITMAP,
  475. @@ -3441,6 +3498,7 @@ static const struct mtk_soc_data mt7621_
  476. };
  477. static const struct mtk_soc_data mt7622_data = {
  478. + .reg_map = &mtk_reg_map,
  479. .ana_rgc3 = 0x2028,
  480. .caps = MT7622_CAPS | MTK_HWLRO,
  481. .hw_features = MTK_HW_FEATURES,
  482. @@ -3454,6 +3512,7 @@ static const struct mtk_soc_data mt7622_
  483. };
  484. static const struct mtk_soc_data mt7623_data = {
  485. + .reg_map = &mtk_reg_map,
  486. .caps = MT7623_CAPS | MTK_HWLRO,
  487. .hw_features = MTK_HW_FEATURES,
  488. .required_clks = MT7623_CLKS_BITMAP,
  489. @@ -3466,6 +3525,7 @@ static const struct mtk_soc_data mt7623_
  490. };
  491. static const struct mtk_soc_data mt7629_data = {
  492. + .reg_map = &mtk_reg_map,
  493. .ana_rgc3 = 0x128,
  494. .caps = MT7629_CAPS | MTK_HWLRO,
  495. .hw_features = MTK_HW_FEATURES,
  496. @@ -3478,6 +3538,7 @@ static const struct mtk_soc_data mt7629_
  497. };
  498. static const struct mtk_soc_data rt5350_data = {
  499. + .reg_map = &mt7628_reg_map,
  500. .caps = MT7628_CAPS,
  501. .hw_features = MTK_HW_FEATURES_MT7628,
  502. .required_clks = MT7628_CLKS_BITMAP,
  503. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  504. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  505. @@ -48,6 +48,8 @@
  506. #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
  507. #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
  508. +#define MTK_QRX_OFFSET 0x10
  509. +
  510. #define MTK_MAX_RX_RING_NUM 4
  511. #define MTK_HW_LRO_DMA_SIZE 8
  512. @@ -100,18 +102,6 @@
  513. /* Unicast Filter MAC Address Register - High */
  514. #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
  515. -/* PDMA RX Base Pointer Register */
  516. -#define MTK_PRX_BASE_PTR0 0x900
  517. -#define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10))
  518. -
  519. -/* PDMA RX Maximum Count Register */
  520. -#define MTK_PRX_MAX_CNT0 0x904
  521. -#define MTK_PRX_MAX_CNT_CFG(x) (MTK_PRX_MAX_CNT0 + (x * 0x10))
  522. -
  523. -/* PDMA RX CPU Pointer Register */
  524. -#define MTK_PRX_CRX_IDX0 0x908
  525. -#define MTK_PRX_CRX_IDX_CFG(x) (MTK_PRX_CRX_IDX0 + (x * 0x10))
  526. -
  527. /* PDMA HW LRO Control Registers */
  528. #define MTK_PDMA_LRO_CTRL_DW0 0x980
  529. #define MTK_LRO_EN BIT(0)
  530. @@ -126,18 +116,19 @@
  531. #define MTK_ADMA_MODE BIT(15)
  532. #define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
  533. -/* PDMA Global Configuration Register */
  534. -#define MTK_PDMA_GLO_CFG 0xa04
  535. +#define MTK_RX_DMA_LRO_EN BIT(8)
  536. #define MTK_MULTI_EN BIT(10)
  537. #define MTK_PDMA_SIZE_8DWORDS (1 << 4)
  538. +/* PDMA Global Configuration Register */
  539. +#define MTK_PDMA_LRO_SDL 0x3000
  540. +#define MTK_RX_CFG_SDL_OFFSET 16
  541. +
  542. /* PDMA Reset Index Register */
  543. -#define MTK_PDMA_RST_IDX 0xa08
  544. #define MTK_PST_DRX_IDX0 BIT(16)
  545. #define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
  546. /* PDMA Delay Interrupt Register */
  547. -#define MTK_PDMA_DELAY_INT 0xa0c
  548. #define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0)
  549. #define MTK_PDMA_DELAY_RX_EN BIT(15)
  550. #define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
  551. @@ -151,19 +142,9 @@
  552. #define MTK_PDMA_DELAY_PINT_MASK 0x7f
  553. #define MTK_PDMA_DELAY_PTIME_MASK 0xff
  554. -/* PDMA Interrupt Status Register */
  555. -#define MTK_PDMA_INT_STATUS 0xa20
  556. -
  557. -/* PDMA Interrupt Mask Register */
  558. -#define MTK_PDMA_INT_MASK 0xa28
  559. -
  560. /* PDMA HW LRO Alter Flow Delta Register */
  561. #define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c
  562. -/* PDMA Interrupt grouping registers */
  563. -#define MTK_PDMA_INT_GRP1 0xa50
  564. -#define MTK_PDMA_INT_GRP2 0xa54
  565. -
  566. /* PDMA HW LRO IP Setting Registers */
  567. #define MTK_LRO_RX_RING0_DIP_DW0 0xb04
  568. #define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40))
  569. @@ -185,26 +166,9 @@
  570. #define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
  571. /* QDMA TX Queue Configuration Registers */
  572. -#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
  573. #define QDMA_RES_THRES 4
  574. -/* QDMA TX Queue Scheduler Registers */
  575. -#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10))
  576. -
  577. -/* QDMA RX Base Pointer Register */
  578. -#define MTK_QRX_BASE_PTR0 0x1900
  579. -
  580. -/* QDMA RX Maximum Count Register */
  581. -#define MTK_QRX_MAX_CNT0 0x1904
  582. -
  583. -/* QDMA RX CPU Pointer Register */
  584. -#define MTK_QRX_CRX_IDX0 0x1908
  585. -
  586. -/* QDMA RX DMA Pointer Register */
  587. -#define MTK_QRX_DRX_IDX0 0x190C
  588. -
  589. /* QDMA Global Configuration Register */
  590. -#define MTK_QDMA_GLO_CFG 0x1A04
  591. #define MTK_RX_2B_OFFSET BIT(31)
  592. #define MTK_RX_BT_32DWORDS (3 << 11)
  593. #define MTK_NDP_CO_PRO BIT(10)
  594. @@ -216,20 +180,12 @@
  595. #define MTK_TX_DMA_EN BIT(0)
  596. #define MTK_DMA_BUSY_TIMEOUT_US 1000000
  597. -/* QDMA Reset Index Register */
  598. -#define MTK_QDMA_RST_IDX 0x1A08
  599. -
  600. -/* QDMA Delay Interrupt Register */
  601. -#define MTK_QDMA_DELAY_INT 0x1A0C
  602. -
  603. /* QDMA Flow Control Register */
  604. -#define MTK_QDMA_FC_THRES 0x1A10
  605. #define FC_THRES_DROP_MODE BIT(20)
  606. #define FC_THRES_DROP_EN (7 << 16)
  607. #define FC_THRES_MIN 0x4444
  608. /* QDMA Interrupt Status Register */
  609. -#define MTK_QDMA_INT_STATUS 0x1A18
  610. #define MTK_RX_DONE_DLY BIT(30)
  611. #define MTK_TX_DONE_DLY BIT(28)
  612. #define MTK_RX_DONE_INT3 BIT(19)
  613. @@ -244,55 +200,8 @@
  614. #define MTK_TX_DONE_INT MTK_TX_DONE_DLY
  615. /* QDMA Interrupt grouping registers */
  616. -#define MTK_QDMA_INT_GRP1 0x1a20
  617. -#define MTK_QDMA_INT_GRP2 0x1a24
  618. #define MTK_RLS_DONE_INT BIT(0)
  619. -/* QDMA Interrupt Status Register */
  620. -#define MTK_QDMA_INT_MASK 0x1A1C
  621. -
  622. -/* QDMA Interrupt Mask Register */
  623. -#define MTK_QDMA_HRED2 0x1A44
  624. -
  625. -/* QDMA TX Forward CPU Pointer Register */
  626. -#define MTK_QTX_CTX_PTR 0x1B00
  627. -
  628. -/* QDMA TX Forward DMA Pointer Register */
  629. -#define MTK_QTX_DTX_PTR 0x1B04
  630. -
  631. -/* QDMA TX Release CPU Pointer Register */
  632. -#define MTK_QTX_CRX_PTR 0x1B10
  633. -
  634. -/* QDMA TX Release DMA Pointer Register */
  635. -#define MTK_QTX_DRX_PTR 0x1B14
  636. -
  637. -/* QDMA FQ Head Pointer Register */
  638. -#define MTK_QDMA_FQ_HEAD 0x1B20
  639. -
  640. -/* QDMA FQ Head Pointer Register */
  641. -#define MTK_QDMA_FQ_TAIL 0x1B24
  642. -
  643. -/* QDMA FQ Free Page Counter Register */
  644. -#define MTK_QDMA_FQ_CNT 0x1B28
  645. -
  646. -/* QDMA FQ Free Page Buffer Length Register */
  647. -#define MTK_QDMA_FQ_BLEN 0x1B2C
  648. -
  649. -/* GMA1 counter / statics register */
  650. -#define MTK_GDM1_RX_GBCNT_L 0x2400
  651. -#define MTK_GDM1_RX_GBCNT_H 0x2404
  652. -#define MTK_GDM1_RX_GPCNT 0x2408
  653. -#define MTK_GDM1_RX_OERCNT 0x2410
  654. -#define MTK_GDM1_RX_FERCNT 0x2414
  655. -#define MTK_GDM1_RX_SERCNT 0x2418
  656. -#define MTK_GDM1_RX_LENCNT 0x241c
  657. -#define MTK_GDM1_RX_CERCNT 0x2420
  658. -#define MTK_GDM1_RX_FCCNT 0x2424
  659. -#define MTK_GDM1_TX_SKIPCNT 0x2428
  660. -#define MTK_GDM1_TX_COLCNT 0x242c
  661. -#define MTK_GDM1_TX_GBCNT_L 0x2430
  662. -#define MTK_GDM1_TX_GBCNT_H 0x2434
  663. -#define MTK_GDM1_TX_GPCNT 0x2438
  664. #define MTK_STAT_OFFSET 0x40
  665. #define MTK_WDMA0_BASE 0x2800
  666. @@ -854,8 +763,46 @@ struct mtk_tx_dma_desc_info {
  667. u8 last:1;
  668. };
  669. +struct mtk_reg_map {
  670. + u32 tx_irq_mask;
  671. + u32 tx_irq_status;
  672. + struct {
  673. + u32 rx_ptr; /* rx base pointer */
  674. + u32 rx_cnt_cfg; /* rx max count configuration */
  675. + u32 pcrx_ptr; /* rx cpu pointer */
  676. + u32 glo_cfg; /* global configuration */
  677. + u32 rst_idx; /* reset index */
  678. + u32 delay_irq; /* delay interrupt */
  679. + u32 irq_status; /* interrupt status */
  680. + u32 irq_mask; /* interrupt mask */
  681. + u32 int_grp;
  682. + } pdma;
  683. + struct {
  684. + u32 qtx_cfg; /* tx queue configuration */
  685. + u32 rx_ptr; /* rx base pointer */
  686. + u32 rx_cnt_cfg; /* rx max count configuration */
  687. + u32 qcrx_ptr; /* rx cpu pointer */
  688. + u32 glo_cfg; /* global configuration */
  689. + u32 rst_idx; /* reset index */
  690. + u32 delay_irq; /* delay interrupt */
  691. + u32 fc_th; /* flow control */
  692. + u32 int_grp;
  693. + u32 hred; /* interrupt mask */
  694. + u32 ctx_ptr; /* tx acquire cpu pointer */
  695. + u32 dtx_ptr; /* tx acquire dma pointer */
  696. + u32 crx_ptr; /* tx release cpu pointer */
  697. + u32 drx_ptr; /* tx release dma pointer */
  698. + u32 fq_head; /* fq head pointer */
  699. + u32 fq_tail; /* fq tail pointer */
  700. + u32 fq_count; /* fq free page count */
  701. + u32 fq_blen; /* fq free page buffer length */
  702. + } qdma;
  703. + u32 gdm1_cnt;
  704. +};
  705. +
  706. /* struct mtk_eth_data - This is the structure holding all differences
  707. * among various plaforms
  708. + * @reg_map Soc register map.
  709. * @ana_rgc3: The offset for register ANA_RGC3 related to
  710. * sgmiisys syscon
  711. * @caps Flags shown the extra capability for the SoC
  712. @@ -868,6 +815,7 @@ struct mtk_tx_dma_desc_info {
  713. * @rxd_size Rx DMA descriptor size.
  714. */
  715. struct mtk_soc_data {
  716. + const struct mtk_reg_map *reg_map;
  717. u32 ana_rgc3;
  718. u32 caps;
  719. u32 required_clks;
  720. @@ -995,8 +943,6 @@ struct mtk_eth {
  721. u32 tx_bytes;
  722. struct dim tx_dim;
  723. - u32 tx_int_mask_reg;
  724. - u32 tx_int_status_reg;
  725. u32 rx_dma_l4_valid;
  726. int ip_align;