750-v6.5-05-net-ethernet-mtk_eth_soc-add-version-in-mtk_soc_data.patch 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. From 5d8d05fbf804b4485646d39551ac27452e45afd3 Mon Sep 17 00:00:00 2001
  2. From: Lorenzo Bianconi <[email protected]>
  3. Date: Tue, 25 Jul 2023 01:52:02 +0100
  4. Subject: [PATCH 099/250] net: ethernet: mtk_eth_soc: add version in
  5. mtk_soc_data
  6. Introduce version field in mtk_soc_data data structure in order to
  7. make mtk_eth driver easier to maintain for chipset configuration
  8. codebase. Get rid of MTK_NETSYS_V2 bit in chip capabilities.
  9. This is a preliminary patch to introduce support for MT7988 SoC.
  10. Signed-off-by: Lorenzo Bianconi <[email protected]>
  11. Signed-off-by: Daniel Golle <[email protected]>
  12. Link: https://lore.kernel.org/r/e52fae302ca135436e5cdd26d38d87be2da63055.1690246066.git.daniel@makrotopia.org
  13. Signed-off-by: Jakub Kicinski <[email protected]>
  14. ---
  15. drivers/net/ethernet/mediatek/mtk_eth_soc.c | 55 +++++++++++--------
  16. drivers/net/ethernet/mediatek/mtk_eth_soc.h | 36 +++++++-----
  17. drivers/net/ethernet/mediatek/mtk_ppe.c | 18 +++---
  18. .../net/ethernet/mediatek/mtk_ppe_offload.c | 2 +-
  19. drivers/net/ethernet/mediatek/mtk_wed.c | 4 +-
  20. 5 files changed, 66 insertions(+), 49 deletions(-)
  21. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  22. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  23. @@ -580,7 +580,7 @@ static void mtk_set_queue_speed(struct m
  24. FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
  25. FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
  26. MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
  27. - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  28. + if (mtk_is_netsys_v1(eth))
  29. val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
  30. if (IS_ENABLED(CONFIG_SOC_MT7621)) {
  31. @@ -956,7 +956,7 @@ static bool mtk_rx_get_desc(struct mtk_e
  32. rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
  33. rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
  34. rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
  35. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  36. + if (mtk_is_netsys_v2_or_greater(eth)) {
  37. rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
  38. rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
  39. }
  40. @@ -1014,7 +1014,7 @@ static int mtk_init_fq_dma(struct mtk_et
  41. txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
  42. txd->txd4 = 0;
  43. - if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
  44. + if (mtk_is_netsys_v2_or_greater(eth)) {
  45. txd->txd5 = 0;
  46. txd->txd6 = 0;
  47. txd->txd7 = 0;
  48. @@ -1205,7 +1205,7 @@ static void mtk_tx_set_dma_desc(struct n
  49. struct mtk_mac *mac = netdev_priv(dev);
  50. struct mtk_eth *eth = mac->hw;
  51. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  52. + if (mtk_is_netsys_v2_or_greater(eth))
  53. mtk_tx_set_dma_desc_v2(dev, txd, info);
  54. else
  55. mtk_tx_set_dma_desc_v1(dev, txd, info);
  56. @@ -1512,7 +1512,7 @@ static void mtk_update_rx_cpu_idx(struct
  57. static bool mtk_page_pool_enabled(struct mtk_eth *eth)
  58. {
  59. - return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
  60. + return eth->soc->version == 2;
  61. }
  62. static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
  63. @@ -1854,7 +1854,7 @@ static int mtk_poll_rx(struct napi_struc
  64. break;
  65. /* find out which mac the packet come from. values start at 1 */
  66. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  67. + if (mtk_is_netsys_v2_or_greater(eth))
  68. mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
  69. else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
  70. !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
  71. @@ -1950,7 +1950,7 @@ static int mtk_poll_rx(struct napi_struc
  72. skb->dev = netdev;
  73. bytes += skb->len;
  74. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  75. + if (mtk_is_netsys_v2_or_greater(eth)) {
  76. reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
  77. hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
  78. if (hash != MTK_RXD5_FOE_ENTRY)
  79. @@ -1975,8 +1975,8 @@ static int mtk_poll_rx(struct napi_struc
  80. /* When using VLAN untagging in combination with DSA, the
  81. * hardware treats the MTK special tag as a VLAN and untags it.
  82. */
  83. - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) &&
  84. - (trxd.rxd2 & RX_DMA_VTAG) && netdev_uses_dsa(netdev)) {
  85. + if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) &&
  86. + netdev_uses_dsa(netdev)) {
  87. unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
  88. if (port < ARRAY_SIZE(eth->dsa_meta) &&
  89. @@ -2286,7 +2286,7 @@ static int mtk_tx_alloc(struct mtk_eth *
  90. txd->txd2 = next_ptr;
  91. txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
  92. txd->txd4 = 0;
  93. - if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
  94. + if (mtk_is_netsys_v2_or_greater(eth)) {
  95. txd->txd5 = 0;
  96. txd->txd6 = 0;
  97. txd->txd7 = 0;
  98. @@ -2339,14 +2339,14 @@ static int mtk_tx_alloc(struct mtk_eth *
  99. FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
  100. FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
  101. MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
  102. - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  103. + if (mtk_is_netsys_v1(eth))
  104. val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
  105. mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
  106. ofs += MTK_QTX_OFFSET;
  107. }
  108. val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
  109. mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
  110. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  111. + if (mtk_is_netsys_v2_or_greater(eth))
  112. mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
  113. } else {
  114. mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
  115. @@ -2475,7 +2475,7 @@ static int mtk_rx_alloc(struct mtk_eth *
  116. rxd->rxd3 = 0;
  117. rxd->rxd4 = 0;
  118. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  119. + if (mtk_is_netsys_v2_or_greater(eth)) {
  120. rxd->rxd5 = 0;
  121. rxd->rxd6 = 0;
  122. rxd->rxd7 = 0;
  123. @@ -3023,7 +3023,7 @@ static int mtk_start_dma(struct mtk_eth
  124. MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
  125. MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
  126. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  127. + if (mtk_is_netsys_v2_or_greater(eth))
  128. val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
  129. MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
  130. MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
  131. @@ -3165,7 +3165,7 @@ static int mtk_open(struct net_device *d
  132. phylink_start(mac->phylink);
  133. netif_tx_start_all_queues(dev);
  134. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  135. + if (mtk_is_netsys_v2_or_greater(eth))
  136. return 0;
  137. if (mtk_uses_dsa(dev) && !eth->prog) {
  138. @@ -3430,7 +3430,7 @@ static void mtk_hw_reset(struct mtk_eth
  139. {
  140. u32 val;
  141. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  142. + if (mtk_is_netsys_v2_or_greater(eth)) {
  143. regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
  144. val = RSTCTRL_PPE0_V2;
  145. } else {
  146. @@ -3442,7 +3442,7 @@ static void mtk_hw_reset(struct mtk_eth
  147. ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
  148. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  149. + if (mtk_is_netsys_v2_or_greater(eth))
  150. regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
  151. 0x3ffffff);
  152. }
  153. @@ -3468,7 +3468,7 @@ static void mtk_hw_warm_reset(struct mtk
  154. return;
  155. }
  156. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  157. + if (mtk_is_netsys_v2_or_greater(eth))
  158. rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
  159. else
  160. rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
  161. @@ -3638,7 +3638,7 @@ static int mtk_hw_init(struct mtk_eth *e
  162. else
  163. mtk_hw_reset(eth);
  164. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  165. + if (mtk_is_netsys_v2_or_greater(eth)) {
  166. /* Set FE to PDMAv2 if necessary */
  167. val = mtk_r32(eth, MTK_FE_GLO_MISC);
  168. mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
  169. @@ -3675,7 +3675,7 @@ static int mtk_hw_init(struct mtk_eth *e
  170. */
  171. val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
  172. mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
  173. - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  174. + if (mtk_is_netsys_v1(eth)) {
  175. val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
  176. mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
  177. @@ -3697,7 +3697,7 @@ static int mtk_hw_init(struct mtk_eth *e
  178. mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
  179. mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
  180. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  181. + if (mtk_is_netsys_v2_or_greater(eth)) {
  182. /* PSE should not drop port8 and port9 packets from WDMA Tx */
  183. mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
  184. @@ -4486,7 +4486,7 @@ static int mtk_probe(struct platform_dev
  185. }
  186. }
  187. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  188. + if (mtk_is_netsys_v2_or_greater(eth)) {
  189. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  190. if (!res) {
  191. err = -EINVAL;
  192. @@ -4594,9 +4594,8 @@ static int mtk_probe(struct platform_dev
  193. }
  194. if (eth->soc->offload_version) {
  195. - u32 num_ppe;
  196. + u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1;
  197. - num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
  198. num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
  199. for (i = 0; i < num_ppe; i++) {
  200. u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
  201. @@ -4688,6 +4687,7 @@ static const struct mtk_soc_data mt2701_
  202. .hw_features = MTK_HW_FEATURES,
  203. .required_clks = MT7623_CLKS_BITMAP,
  204. .required_pctl = true,
  205. + .version = 1,
  206. .txrx = {
  207. .txd_size = sizeof(struct mtk_tx_dma),
  208. .rxd_size = sizeof(struct mtk_rx_dma),
  209. @@ -4704,6 +4704,7 @@ static const struct mtk_soc_data mt7621_
  210. .hw_features = MTK_HW_FEATURES,
  211. .required_clks = MT7621_CLKS_BITMAP,
  212. .required_pctl = false,
  213. + .version = 1,
  214. .offload_version = 1,
  215. .hash_offset = 2,
  216. .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
  217. @@ -4724,6 +4725,7 @@ static const struct mtk_soc_data mt7622_
  218. .hw_features = MTK_HW_FEATURES,
  219. .required_clks = MT7622_CLKS_BITMAP,
  220. .required_pctl = false,
  221. + .version = 1,
  222. .offload_version = 2,
  223. .hash_offset = 2,
  224. .has_accounting = true,
  225. @@ -4744,6 +4746,7 @@ static const struct mtk_soc_data mt7623_
  226. .hw_features = MTK_HW_FEATURES,
  227. .required_clks = MT7623_CLKS_BITMAP,
  228. .required_pctl = true,
  229. + .version = 1,
  230. .offload_version = 1,
  231. .hash_offset = 2,
  232. .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
  233. @@ -4766,6 +4769,7 @@ static const struct mtk_soc_data mt7629_
  234. .required_clks = MT7629_CLKS_BITMAP,
  235. .required_pctl = false,
  236. .has_accounting = true,
  237. + .version = 1,
  238. .txrx = {
  239. .txd_size = sizeof(struct mtk_tx_dma),
  240. .rxd_size = sizeof(struct mtk_rx_dma),
  241. @@ -4783,6 +4787,7 @@ static const struct mtk_soc_data mt7981_
  242. .hw_features = MTK_HW_FEATURES,
  243. .required_clks = MT7981_CLKS_BITMAP,
  244. .required_pctl = false,
  245. + .version = 2,
  246. .offload_version = 2,
  247. .hash_offset = 4,
  248. .has_accounting = true,
  249. @@ -4804,6 +4809,7 @@ static const struct mtk_soc_data mt7986_
  250. .hw_features = MTK_HW_FEATURES,
  251. .required_clks = MT7986_CLKS_BITMAP,
  252. .required_pctl = false,
  253. + .version = 2,
  254. .offload_version = 2,
  255. .hash_offset = 4,
  256. .has_accounting = true,
  257. @@ -4824,6 +4830,7 @@ static const struct mtk_soc_data rt5350_
  258. .hw_features = MTK_HW_FEATURES_MT7628,
  259. .required_clks = MT7628_CLKS_BITMAP,
  260. .required_pctl = false,
  261. + .version = 1,
  262. .txrx = {
  263. .txd_size = sizeof(struct mtk_tx_dma),
  264. .rxd_size = sizeof(struct mtk_rx_dma),
  265. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  266. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  267. @@ -820,7 +820,6 @@ enum mkt_eth_capabilities {
  268. MTK_SHARED_INT_BIT,
  269. MTK_TRGMII_MT7621_CLK_BIT,
  270. MTK_QDMA_BIT,
  271. - MTK_NETSYS_V2_BIT,
  272. MTK_SOC_MT7628_BIT,
  273. MTK_RSTCTRL_PPE1_BIT,
  274. MTK_U3_COPHY_V2_BIT,
  275. @@ -855,7 +854,6 @@ enum mkt_eth_capabilities {
  276. #define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT)
  277. #define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT)
  278. #define MTK_QDMA BIT(MTK_QDMA_BIT)
  279. -#define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT)
  280. #define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
  281. #define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT)
  282. #define MTK_U3_COPHY_V2 BIT(MTK_U3_COPHY_V2_BIT)
  283. @@ -934,11 +932,11 @@ enum mkt_eth_capabilities {
  284. #define MT7981_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
  285. MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
  286. MTK_MUX_U3_GMAC2_TO_QPHY | MTK_U3_COPHY_V2 | \
  287. - MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1)
  288. + MTK_RSTCTRL_PPE1)
  289. #define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
  290. MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
  291. - MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1)
  292. + MTK_RSTCTRL_PPE1)
  293. struct mtk_tx_dma_desc_info {
  294. dma_addr_t addr;
  295. @@ -1009,6 +1007,7 @@ struct mtk_reg_map {
  296. * @required_pctl A bool value to show whether the SoC requires
  297. * the extra setup for those pins used by GMAC.
  298. * @hash_offset Flow table hash offset.
  299. + * @version SoC version.
  300. * @foe_entry_size Foe table entry size.
  301. * @has_accounting Bool indicating support for accounting of
  302. * offloaded flows.
  303. @@ -1027,6 +1026,7 @@ struct mtk_soc_data {
  304. bool required_pctl;
  305. u8 offload_version;
  306. u8 hash_offset;
  307. + u8 version;
  308. u16 foe_entry_size;
  309. netdev_features_t hw_features;
  310. bool has_accounting;
  311. @@ -1183,6 +1183,16 @@ struct mtk_mac {
  312. /* the struct describing the SoC. these are declared in the soc_xyz.c files */
  313. extern const struct of_device_id of_mtk_match[];
  314. +static inline bool mtk_is_netsys_v1(struct mtk_eth *eth)
  315. +{
  316. + return eth->soc->version == 1;
  317. +}
  318. +
  319. +static inline bool mtk_is_netsys_v2_or_greater(struct mtk_eth *eth)
  320. +{
  321. + return eth->soc->version > 1;
  322. +}
  323. +
  324. static inline struct mtk_foe_entry *
  325. mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash)
  326. {
  327. @@ -1193,7 +1203,7 @@ mtk_foe_get_entry(struct mtk_ppe *ppe, u
  328. static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth)
  329. {
  330. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  331. + if (mtk_is_netsys_v2_or_greater(eth))
  332. return MTK_FOE_IB1_BIND_TIMESTAMP_V2;
  333. return MTK_FOE_IB1_BIND_TIMESTAMP;
  334. @@ -1201,7 +1211,7 @@ static inline u32 mtk_get_ib1_ts_mask(st
  335. static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth)
  336. {
  337. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  338. + if (mtk_is_netsys_v2_or_greater(eth))
  339. return MTK_FOE_IB1_BIND_PPPOE_V2;
  340. return MTK_FOE_IB1_BIND_PPPOE;
  341. @@ -1209,7 +1219,7 @@ static inline u32 mtk_get_ib1_ppoe_mask(
  342. static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth)
  343. {
  344. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  345. + if (mtk_is_netsys_v2_or_greater(eth))
  346. return MTK_FOE_IB1_BIND_VLAN_TAG_V2;
  347. return MTK_FOE_IB1_BIND_VLAN_TAG;
  348. @@ -1217,7 +1227,7 @@ static inline u32 mtk_get_ib1_vlan_tag_m
  349. static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth)
  350. {
  351. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  352. + if (mtk_is_netsys_v2_or_greater(eth))
  353. return MTK_FOE_IB1_BIND_VLAN_LAYER_V2;
  354. return MTK_FOE_IB1_BIND_VLAN_LAYER;
  355. @@ -1225,7 +1235,7 @@ static inline u32 mtk_get_ib1_vlan_layer
  356. static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
  357. {
  358. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  359. + if (mtk_is_netsys_v2_or_greater(eth))
  360. return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
  361. return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
  362. @@ -1233,7 +1243,7 @@ static inline u32 mtk_prep_ib1_vlan_laye
  363. static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
  364. {
  365. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  366. + if (mtk_is_netsys_v2_or_greater(eth))
  367. return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
  368. return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
  369. @@ -1241,7 +1251,7 @@ static inline u32 mtk_get_ib1_vlan_layer
  370. static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth)
  371. {
  372. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  373. + if (mtk_is_netsys_v2_or_greater(eth))
  374. return MTK_FOE_IB1_PACKET_TYPE_V2;
  375. return MTK_FOE_IB1_PACKET_TYPE;
  376. @@ -1249,7 +1259,7 @@ static inline u32 mtk_get_ib1_pkt_type_m
  377. static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val)
  378. {
  379. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  380. + if (mtk_is_netsys_v2_or_greater(eth))
  381. return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val);
  382. return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val);
  383. @@ -1257,7 +1267,7 @@ static inline u32 mtk_get_ib1_pkt_type(s
  384. static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth)
  385. {
  386. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  387. + if (mtk_is_netsys_v2_or_greater(eth))
  388. return MTK_FOE_IB2_MULTICAST_V2;
  389. return MTK_FOE_IB2_MULTICAST;
  390. --- a/drivers/net/ethernet/mediatek/mtk_ppe.c
  391. +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
  392. @@ -207,7 +207,7 @@ int mtk_foe_entry_prepare(struct mtk_eth
  393. memset(entry, 0, sizeof(*entry));
  394. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  395. + if (mtk_is_netsys_v2_or_greater(eth)) {
  396. val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
  397. FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
  398. FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
  399. @@ -271,7 +271,7 @@ int mtk_foe_entry_set_pse_port(struct mt
  400. u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
  401. u32 val = *ib2;
  402. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  403. + if (mtk_is_netsys_v2_or_greater(eth)) {
  404. val &= ~MTK_FOE_IB2_DEST_PORT_V2;
  405. val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
  406. } else {
  407. @@ -422,7 +422,7 @@ int mtk_foe_entry_set_wdma(struct mtk_et
  408. struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
  409. u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
  410. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  411. + if (mtk_is_netsys_v2_or_greater(eth)) {
  412. *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
  413. *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
  414. MTK_FOE_IB2_WDMA_WINFO_V2;
  415. @@ -446,7 +446,7 @@ int mtk_foe_entry_set_queue(struct mtk_e
  416. {
  417. u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
  418. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  419. + if (mtk_is_netsys_v2_or_greater(eth)) {
  420. *ib2 &= ~MTK_FOE_IB2_QID_V2;
  421. *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
  422. *ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
  423. @@ -601,7 +601,7 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
  424. struct mtk_foe_entry *hwe;
  425. u32 val;
  426. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  427. + if (mtk_is_netsys_v2_or_greater(eth)) {
  428. entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
  429. entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
  430. timestamp);
  431. @@ -617,7 +617,7 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
  432. hwe->ib1 = entry->ib1;
  433. if (ppe->accounting) {
  434. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  435. + if (mtk_is_netsys_v2_or_greater(eth))
  436. val = MTK_FOE_IB2_MIB_CNT_V2;
  437. else
  438. val = MTK_FOE_IB2_MIB_CNT;
  439. @@ -965,7 +965,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
  440. MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
  441. FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
  442. MTK_PPE_ENTRIES_SHIFT);
  443. - if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
  444. + if (mtk_is_netsys_v2_or_greater(ppe->eth))
  445. val |= MTK_PPE_TB_CFG_INFO_SEL;
  446. ppe_w32(ppe, MTK_PPE_TB_CFG, val);
  447. @@ -981,7 +981,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
  448. MTK_PPE_FLOW_CFG_IP4_NAPT |
  449. MTK_PPE_FLOW_CFG_IP4_DSLITE |
  450. MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
  451. - if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
  452. + if (mtk_is_netsys_v2_or_greater(ppe->eth))
  453. val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
  454. MTK_PPE_MD_TOAP_BYP_CRSN1 |
  455. MTK_PPE_MD_TOAP_BYP_CRSN2 |
  456. @@ -1023,7 +1023,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
  457. ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
  458. - if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
  459. + if (mtk_is_netsys_v2_or_greater(ppe->eth)) {
  460. ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
  461. ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
  462. }
  463. --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
  464. +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
  465. @@ -193,7 +193,7 @@ mtk_flow_set_output_device(struct mtk_et
  466. if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
  467. mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
  468. info.bss, info.wcid);
  469. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  470. + if (mtk_is_netsys_v2_or_greater(eth)) {
  471. switch (info.wdma_idx) {
  472. case 0:
  473. pse_port = 8;
  474. --- a/drivers/net/ethernet/mediatek/mtk_wed.c
  475. +++ b/drivers/net/ethernet/mediatek/mtk_wed.c
  476. @@ -1084,7 +1084,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *
  477. } else {
  478. struct mtk_eth *eth = dev->hw->eth;
  479. - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  480. + if (mtk_is_netsys_v2_or_greater(eth))
  481. wed_set(dev, MTK_WED_RESET_IDX,
  482. MTK_WED_RESET_IDX_RX_V2);
  483. else
  484. @@ -1806,7 +1806,7 @@ void mtk_wed_add_hw(struct device_node *
  485. hw->wdma = wdma;
  486. hw->index = index;
  487. hw->irq = irq;
  488. - hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
  489. + hw->version = mtk_is_netsys_v1(eth) ? 1 : 2;
  490. if (hw->version == 1) {
  491. hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,