721-v6.0-net-ethernet-mtk_eth_wed-add-wed-support-for-mt7986-.patch 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942
  1. From 463a71af080fbc77339bee2037fb1e081e3824f7 Mon Sep 17 00:00:00 2001
  2. Message-Id: <463a71af080fbc77339bee2037fb1e081e3824f7.1662886034.git.lorenzo@kernel.org>
  3. In-Reply-To: <e5ecb4f619197b93fa682d722452dc8412864cdb.1662886033.git.lorenzo@kernel.org>
  4. References: <e5ecb4f619197b93fa682d722452dc8412864cdb.1662886033.git.lorenzo@kernel.org>
  5. From: Lorenzo Bianconi <[email protected]>
  6. Date: Sat, 27 Aug 2022 16:15:14 +0200
  7. Subject: [PATCH net-next 2/5] net: ethernet: mtk_eth_wed: add wed support for
  8. mt7986 chipset
  9. Introduce Wireless Etherne Dispatcher support on transmission side
  10. for mt7986 chipset
  11. Co-developed-by: Bo Jiao <[email protected]>
  12. Signed-off-by: Bo Jiao <[email protected]>
  13. Co-developed-by: Sujuan Chen <[email protected]>
  14. Signed-off-by: Sujuan Chen <[email protected]>
  15. Signed-off-by: Lorenzo Bianconi <[email protected]>
  16. ---
  17. drivers/net/ethernet/mediatek/mtk_eth_soc.c | 34 +-
  18. drivers/net/ethernet/mediatek/mtk_wed.c | 371 ++++++++++++++----
  19. drivers/net/ethernet/mediatek/mtk_wed.h | 8 +-
  20. .../net/ethernet/mediatek/mtk_wed_debugfs.c | 3 +
  21. drivers/net/ethernet/mediatek/mtk_wed_regs.h | 81 +++-
  22. include/linux/soc/mediatek/mtk_wed.h | 8 +
  23. 6 files changed, 408 insertions(+), 97 deletions(-)
  24. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  25. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  26. @@ -3890,6 +3890,7 @@ void mtk_eth_set_dma_device(struct mtk_e
  27. static int mtk_probe(struct platform_device *pdev)
  28. {
  29. + struct resource *res = NULL;
  30. struct device_node *mac_np;
  31. struct mtk_eth *eth;
  32. int err, i;
  33. @@ -3970,16 +3971,31 @@ static int mtk_probe(struct platform_dev
  34. }
  35. }
  36. - for (i = 0;; i++) {
  37. - struct device_node *np = of_parse_phandle(pdev->dev.of_node,
  38. - "mediatek,wed", i);
  39. - void __iomem *wdma;
  40. -
  41. - if (!np || i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
  42. - break;
  43. -
  44. - wdma = eth->base + eth->soc->reg_map->wdma_base[i];
  45. - mtk_wed_add_hw(np, eth, wdma, i);
  46. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  47. + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  48. + if (!res)
  49. + return -EINVAL;
  50. + }
  51. +
  52. + if (eth->soc->offload_version) {
  53. + for (i = 0;; i++) {
  54. + struct device_node *np;
  55. + phys_addr_t wdma_phy;
  56. + u32 wdma_base;
  57. +
  58. + if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
  59. + break;
  60. +
  61. + np = of_parse_phandle(pdev->dev.of_node,
  62. + "mediatek,wed", i);
  63. + if (!np)
  64. + break;
  65. +
  66. + wdma_base = eth->soc->reg_map->wdma_base[i];
  67. + wdma_phy = res ? res->start + wdma_base : 0;
  68. + mtk_wed_add_hw(np, eth, eth->base + wdma_base,
  69. + wdma_phy, i);
  70. + }
  71. }
  72. for (i = 0; i < 3; i++) {
  73. --- a/drivers/net/ethernet/mediatek/mtk_wed.c
  74. +++ b/drivers/net/ethernet/mediatek/mtk_wed.c
  75. @@ -25,6 +25,11 @@
  76. #define MTK_WED_TX_RING_SIZE 2048
  77. #define MTK_WED_WDMA_RING_SIZE 1024
  78. +#define MTK_WED_MAX_GROUP_SIZE 0x100
  79. +#define MTK_WED_VLD_GROUP_SIZE 0x40
  80. +#define MTK_WED_PER_GROUP_PKT 128
  81. +
  82. +#define MTK_WED_FBUF_SIZE 128
  83. static struct mtk_wed_hw *hw_list[2];
  84. static DEFINE_MUTEX(hw_lock);
  85. @@ -150,10 +155,17 @@ mtk_wed_buffer_alloc(struct mtk_wed_devi
  86. desc->buf0 = cpu_to_le32(buf_phys);
  87. desc->buf1 = cpu_to_le32(buf_phys + txd_size);
  88. - ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
  89. - FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
  90. - MTK_WED_BUF_SIZE - txd_size) |
  91. - MTK_WDMA_DESC_CTRL_LAST_SEG1;
  92. +
  93. + if (dev->hw->version == 1)
  94. + ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
  95. + FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
  96. + MTK_WED_BUF_SIZE - txd_size) |
  97. + MTK_WDMA_DESC_CTRL_LAST_SEG1;
  98. + else
  99. + ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
  100. + FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
  101. + MTK_WED_BUF_SIZE - txd_size) |
  102. + MTK_WDMA_DESC_CTRL_LAST_SEG0;
  103. desc->ctrl = cpu_to_le32(ctrl);
  104. desc->info = 0;
  105. desc++;
  106. @@ -209,7 +221,7 @@ mtk_wed_free_ring(struct mtk_wed_device
  107. if (!ring->desc)
  108. return;
  109. - dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc),
  110. + dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size,
  111. ring->desc, ring->desc_phys);
  112. }
  113. @@ -229,6 +241,14 @@ mtk_wed_set_ext_int(struct mtk_wed_devic
  114. {
  115. u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
  116. + if (dev->hw->version == 1)
  117. + mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
  118. + else
  119. + mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
  120. + MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
  121. + MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
  122. + MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
  123. +
  124. if (!dev->hw->num_flows)
  125. mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
  126. @@ -237,6 +257,20 @@ mtk_wed_set_ext_int(struct mtk_wed_devic
  127. }
  128. static void
  129. +mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
  130. +{
  131. + if (enable) {
  132. + wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
  133. + wed_w32(dev, MTK_WED_TXP_DW1,
  134. + FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
  135. + } else {
  136. + wed_w32(dev, MTK_WED_TXP_DW1,
  137. + FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
  138. + wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
  139. + }
  140. +}
  141. +
  142. +static void
  143. mtk_wed_dma_disable(struct mtk_wed_device *dev)
  144. {
  145. wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
  146. @@ -249,12 +283,22 @@ mtk_wed_dma_disable(struct mtk_wed_devic
  147. MTK_WED_GLO_CFG_TX_DMA_EN |
  148. MTK_WED_GLO_CFG_RX_DMA_EN);
  149. - regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
  150. wdma_m32(dev, MTK_WDMA_GLO_CFG,
  151. MTK_WDMA_GLO_CFG_TX_DMA_EN |
  152. MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
  153. - MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
  154. - MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0);
  155. + MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0);
  156. +
  157. + if (dev->hw->version == 1) {
  158. + regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
  159. + wdma_m32(dev, MTK_WDMA_GLO_CFG,
  160. + MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0);
  161. + } else {
  162. + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
  163. + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
  164. + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
  165. +
  166. + mtk_wed_set_512_support(dev, false);
  167. + }
  168. }
  169. static void
  170. @@ -293,7 +337,7 @@ mtk_wed_detach(struct mtk_wed_device *de
  171. mtk_wed_free_buffer(dev);
  172. mtk_wed_free_tx_rings(dev);
  173. - if (of_dma_is_coherent(wlan_node))
  174. + if (of_dma_is_coherent(wlan_node) && hw->hifsys)
  175. regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
  176. BIT(hw->index), BIT(hw->index));
  177. @@ -308,14 +352,69 @@ mtk_wed_detach(struct mtk_wed_device *de
  178. mutex_unlock(&hw_lock);
  179. }
  180. +#define PCIE_BASE_ADDR0 0x11280000
  181. +static void
  182. +mtk_wed_bus_init(struct mtk_wed_device *dev)
  183. +{
  184. + struct device_node *np = dev->hw->eth->dev->of_node;
  185. + struct regmap *regs;
  186. + u32 val;
  187. +
  188. + regs = syscon_regmap_lookup_by_phandle(np, "mediatek,wed-pcie");
  189. + if (IS_ERR(regs))
  190. + return;
  191. +
  192. + regmap_update_bits(regs, 0, BIT(0), BIT(0));
  193. +
  194. + wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
  195. + FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
  196. +
  197. + /* pcie interrupt control: pola/source selection */
  198. + wed_set(dev, MTK_WED_PCIE_INT_CTRL,
  199. + MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
  200. + FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
  201. + wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
  202. +
  203. + val = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
  204. + val = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
  205. + wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
  206. + wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
  207. +
  208. + val = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
  209. + val = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
  210. +
  211. + /* pcie interrupt status trigger register */
  212. + wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
  213. + wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
  214. +
  215. + /* pola setting */
  216. + val = wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
  217. + wed_set(dev, MTK_WED_PCIE_INT_CTRL, MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
  218. +}
  219. +
  220. +static void
  221. +mtk_wed_set_wpdma(struct mtk_wed_device *dev)
  222. +{
  223. + if (dev->hw->version == 1) {
  224. + wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
  225. + } else {
  226. + mtk_wed_bus_init(dev);
  227. +
  228. + wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
  229. + wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
  230. + wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
  231. + wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
  232. + }
  233. +}
  234. +
  235. static void
  236. mtk_wed_hw_init_early(struct mtk_wed_device *dev)
  237. {
  238. u32 mask, set;
  239. - u32 offset;
  240. mtk_wed_stop(dev);
  241. mtk_wed_reset(dev, MTK_WED_RESET_WED);
  242. + mtk_wed_set_wpdma(dev);
  243. mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
  244. MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
  245. @@ -325,17 +424,33 @@ mtk_wed_hw_init_early(struct mtk_wed_dev
  246. MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
  247. wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
  248. - wdma_set(dev, MTK_WDMA_GLO_CFG,
  249. - MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
  250. - MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
  251. - MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
  252. -
  253. - offset = dev->hw->index ? 0x04000400 : 0;
  254. - wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
  255. - wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
  256. + if (dev->hw->version == 1) {
  257. + u32 offset = dev->hw->index ? 0x04000400 : 0;
  258. - wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index));
  259. - wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
  260. + wdma_set(dev, MTK_WDMA_GLO_CFG,
  261. + MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
  262. + MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
  263. + MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
  264. +
  265. + wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
  266. + wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
  267. + wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
  268. + MTK_PCIE_BASE(dev->hw->index));
  269. + } else {
  270. + wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy);
  271. + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT);
  272. + wed_w32(dev, MTK_WED_WDMA_OFFSET0,
  273. + FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS,
  274. + MTK_WDMA_INT_STATUS) |
  275. + FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG,
  276. + MTK_WDMA_GLO_CFG));
  277. +
  278. + wed_w32(dev, MTK_WED_WDMA_OFFSET1,
  279. + FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL,
  280. + MTK_WDMA_RING_TX(0)) |
  281. + FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL,
  282. + MTK_WDMA_RING_RX(0)));
  283. + }
  284. }
  285. static void
  286. @@ -355,37 +470,65 @@ mtk_wed_hw_init(struct mtk_wed_device *d
  287. wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
  288. - wed_w32(dev, MTK_WED_TX_BM_TKID,
  289. - FIELD_PREP(MTK_WED_TX_BM_TKID_START,
  290. - dev->wlan.token_start) |
  291. - FIELD_PREP(MTK_WED_TX_BM_TKID_END,
  292. - dev->wlan.token_start + dev->wlan.nbuf - 1));
  293. -
  294. wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
  295. - wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
  296. - FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
  297. - MTK_WED_TX_BM_DYN_THR_HI);
  298. + if (dev->hw->version == 1) {
  299. + wed_w32(dev, MTK_WED_TX_BM_TKID,
  300. + FIELD_PREP(MTK_WED_TX_BM_TKID_START,
  301. + dev->wlan.token_start) |
  302. + FIELD_PREP(MTK_WED_TX_BM_TKID_END,
  303. + dev->wlan.token_start +
  304. + dev->wlan.nbuf - 1));
  305. + wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
  306. + FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
  307. + MTK_WED_TX_BM_DYN_THR_HI);
  308. + } else {
  309. + wed_w32(dev, MTK_WED_TX_BM_TKID_V2,
  310. + FIELD_PREP(MTK_WED_TX_BM_TKID_START,
  311. + dev->wlan.token_start) |
  312. + FIELD_PREP(MTK_WED_TX_BM_TKID_END,
  313. + dev->wlan.token_start +
  314. + dev->wlan.nbuf - 1));
  315. + wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
  316. + FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
  317. + MTK_WED_TX_BM_DYN_THR_HI_V2);
  318. + wed_w32(dev, MTK_WED_TX_TKID_CTRL,
  319. + MTK_WED_TX_TKID_CTRL_PAUSE |
  320. + FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
  321. + dev->buf_ring.size / 128) |
  322. + FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
  323. + dev->buf_ring.size / 128));
  324. + wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
  325. + FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
  326. + MTK_WED_TX_TKID_DYN_THR_HI);
  327. + }
  328. mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
  329. - wed_set(dev, MTK_WED_CTRL,
  330. - MTK_WED_CTRL_WED_TX_BM_EN |
  331. - MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
  332. + if (dev->hw->version == 1)
  333. + wed_set(dev, MTK_WED_CTRL,
  334. + MTK_WED_CTRL_WED_TX_BM_EN |
  335. + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
  336. + else
  337. + wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
  338. wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
  339. }
  340. static void
  341. -mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size)
  342. +mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size)
  343. {
  344. + void *head = (void *)ring->desc;
  345. int i;
  346. for (i = 0; i < size; i++) {
  347. - desc[i].buf0 = 0;
  348. - desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
  349. - desc[i].buf1 = 0;
  350. - desc[i].info = 0;
  351. + struct mtk_wdma_desc *desc;
  352. +
  353. + desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size);
  354. + desc->buf0 = 0;
  355. + desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
  356. + desc->buf1 = 0;
  357. + desc->info = 0;
  358. }
  359. }
  360. @@ -436,12 +579,10 @@ mtk_wed_reset_dma(struct mtk_wed_device
  361. int i;
  362. for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
  363. - struct mtk_wdma_desc *desc = dev->tx_ring[i].desc;
  364. -
  365. - if (!desc)
  366. + if (!dev->tx_ring[i].desc)
  367. continue;
  368. - mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE);
  369. + mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE);
  370. }
  371. if (mtk_wed_poll_busy(dev))
  372. @@ -498,16 +639,16 @@ mtk_wed_reset_dma(struct mtk_wed_device
  373. static int
  374. mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
  375. - int size)
  376. + int size, u32 desc_size)
  377. {
  378. - ring->desc = dma_alloc_coherent(dev->hw->dev,
  379. - size * sizeof(*ring->desc),
  380. + ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size,
  381. &ring->desc_phys, GFP_KERNEL);
  382. if (!ring->desc)
  383. return -ENOMEM;
  384. + ring->desc_size = desc_size;
  385. ring->size = size;
  386. - mtk_wed_ring_reset(ring->desc, size);
  387. + mtk_wed_ring_reset(ring, size);
  388. return 0;
  389. }
  390. @@ -515,9 +656,10 @@ mtk_wed_ring_alloc(struct mtk_wed_device
  391. static int
  392. mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
  393. {
  394. + u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
  395. struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
  396. - if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE))
  397. + if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size))
  398. return -ENOMEM;
  399. wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
  400. @@ -546,16 +688,41 @@ mtk_wed_configure_irq(struct mtk_wed_dev
  401. MTK_WED_CTRL_WED_TX_BM_EN |
  402. MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
  403. - wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
  404. - MTK_WED_PCIE_INT_TRIGGER_STATUS);
  405. + if (dev->hw->version == 1) {
  406. + wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
  407. + MTK_WED_PCIE_INT_TRIGGER_STATUS);
  408. +
  409. + wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
  410. + MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
  411. + MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
  412. - wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
  413. - MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
  414. - MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
  415. + wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
  416. + } else {
  417. + /* initail tx interrupt trigger */
  418. + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
  419. + MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
  420. + MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
  421. + MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN |
  422. + MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR |
  423. + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG,
  424. + dev->wlan.tx_tbit[0]) |
  425. + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
  426. + dev->wlan.tx_tbit[1]));
  427. +
  428. + /* initail txfree interrupt trigger */
  429. + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
  430. + MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
  431. + MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
  432. + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
  433. + dev->wlan.txfree_tbit));
  434. +
  435. + wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
  436. + wed_set(dev, MTK_WED_WDMA_INT_CTRL,
  437. + FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL,
  438. + dev->wdma_idx));
  439. + }
  440. - /* initail wdma interrupt agent */
  441. wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
  442. - wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
  443. wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
  444. wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
  445. @@ -580,14 +747,28 @@ mtk_wed_dma_enable(struct mtk_wed_device
  446. wdma_set(dev, MTK_WDMA_GLO_CFG,
  447. MTK_WDMA_GLO_CFG_TX_DMA_EN |
  448. MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
  449. - MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
  450. - MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
  451. + MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
  452. +
  453. + if (dev->hw->version == 1) {
  454. + wdma_set(dev, MTK_WDMA_GLO_CFG,
  455. + MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
  456. + } else {
  457. + wed_set(dev, MTK_WED_WPDMA_CTRL,
  458. + MTK_WED_WPDMA_CTRL_SDL1_FIXED);
  459. +
  460. + wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
  461. + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
  462. + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
  463. +
  464. + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
  465. + MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
  466. + MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
  467. + }
  468. }
  469. static void
  470. mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
  471. {
  472. - u32 val;
  473. int i;
  474. for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
  475. @@ -598,14 +779,17 @@ mtk_wed_start(struct mtk_wed_device *dev
  476. mtk_wed_configure_irq(dev, irq_mask);
  477. mtk_wed_set_ext_int(dev, true);
  478. - val = dev->wlan.wpdma_phys |
  479. - MTK_PCIE_MIRROR_MAP_EN |
  480. - FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index);
  481. -
  482. - if (dev->hw->index)
  483. - val |= BIT(1);
  484. - val |= BIT(0);
  485. - regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
  486. +
  487. + if (dev->hw->version == 1) {
  488. + u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN |
  489. + FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID,
  490. + dev->hw->index);
  491. +
  492. + val |= BIT(0) | (BIT(1) * !!dev->hw->index);
  493. + regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
  494. + } else {
  495. + mtk_wed_set_512_support(dev, true);
  496. + }
  497. mtk_wed_dma_enable(dev);
  498. dev->running = true;
  499. @@ -639,7 +823,9 @@ mtk_wed_attach(struct mtk_wed_device *de
  500. goto out;
  501. }
  502. - dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index);
  503. + dev_info(&dev->wlan.pci_dev->dev,
  504. + "attaching wed device %d version %d\n",
  505. + hw->index, hw->version);
  506. dev->hw = hw;
  507. dev->dev = hw->dev;
  508. @@ -657,7 +843,9 @@ mtk_wed_attach(struct mtk_wed_device *de
  509. }
  510. mtk_wed_hw_init_early(dev);
  511. - regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0);
  512. + if (hw->hifsys)
  513. + regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
  514. + BIT(hw->index), 0);
  515. out:
  516. mutex_unlock(&hw_lock);
  517. @@ -684,7 +872,8 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
  518. BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring));
  519. - if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
  520. + if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
  521. + sizeof(*ring->desc)))
  522. return -ENOMEM;
  523. if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
  524. @@ -711,21 +900,21 @@ static int
  525. mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
  526. {
  527. struct mtk_wed_ring *ring = &dev->txfree_ring;
  528. - int i;
  529. + int i, index = dev->hw->version == 1;
  530. /*
  531. * For txfree event handling, the same DMA ring is shared between WED
  532. * and WLAN. The WLAN driver accesses the ring index registers through
  533. * WED
  534. */
  535. - ring->reg_base = MTK_WED_RING_RX(1);
  536. + ring->reg_base = MTK_WED_RING_RX(index);
  537. ring->wpdma = regs;
  538. for (i = 0; i < 12; i += 4) {
  539. u32 val = readl(regs + i);
  540. - wed_w32(dev, MTK_WED_RING_RX(1) + i, val);
  541. - wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val);
  542. + wed_w32(dev, MTK_WED_RING_RX(index) + i, val);
  543. + wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val);
  544. }
  545. return 0;
  546. @@ -734,11 +923,19 @@ mtk_wed_txfree_ring_setup(struct mtk_wed
  547. static u32
  548. mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
  549. {
  550. - u32 val;
  551. + u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
  552. +
  553. + if (dev->hw->version == 1)
  554. + ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
  555. + else
  556. + ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
  557. + MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
  558. + MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
  559. + MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
  560. val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
  561. wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
  562. - val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
  563. + val &= ext_mask;
  564. if (!dev->hw->num_flows)
  565. val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
  566. if (val && net_ratelimit())
  567. @@ -813,7 +1010,8 @@ out:
  568. }
  569. void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
  570. - void __iomem *wdma, int index)
  571. + void __iomem *wdma, phys_addr_t wdma_phy,
  572. + int index)
  573. {
  574. static const struct mtk_wed_ops wed_ops = {
  575. .attach = mtk_wed_attach,
  576. @@ -860,26 +1058,33 @@ void mtk_wed_add_hw(struct device_node *
  577. hw = kzalloc(sizeof(*hw), GFP_KERNEL);
  578. if (!hw)
  579. goto unlock;
  580. +
  581. hw->node = np;
  582. hw->regs = regs;
  583. hw->eth = eth;
  584. hw->dev = &pdev->dev;
  585. + hw->wdma_phy = wdma_phy;
  586. hw->wdma = wdma;
  587. hw->index = index;
  588. hw->irq = irq;
  589. - hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
  590. - "mediatek,pcie-mirror");
  591. - hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
  592. - "mediatek,hifsys");
  593. - if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
  594. - kfree(hw);
  595. - goto unlock;
  596. - }
  597. + hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
  598. - if (!index) {
  599. - regmap_write(hw->mirror, 0, 0);
  600. - regmap_write(hw->mirror, 4, 0);
  601. + if (hw->version == 1) {
  602. + hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
  603. + "mediatek,pcie-mirror");
  604. + hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
  605. + "mediatek,hifsys");
  606. + if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
  607. + kfree(hw);
  608. + goto unlock;
  609. + }
  610. +
  611. + if (!index) {
  612. + regmap_write(hw->mirror, 0, 0);
  613. + regmap_write(hw->mirror, 4, 0);
  614. + }
  615. }
  616. +
  617. mtk_wed_hw_add_debugfs(hw);
  618. hw_list[index] = hw;
  619. --- a/drivers/net/ethernet/mediatek/mtk_wed.h
  620. +++ b/drivers/net/ethernet/mediatek/mtk_wed.h
  621. @@ -18,11 +18,13 @@ struct mtk_wed_hw {
  622. struct regmap *hifsys;
  623. struct device *dev;
  624. void __iomem *wdma;
  625. + phys_addr_t wdma_phy;
  626. struct regmap *mirror;
  627. struct dentry *debugfs_dir;
  628. struct mtk_wed_device *wed_dev;
  629. u32 debugfs_reg;
  630. u32 num_flows;
  631. + u8 version;
  632. char dirname[5];
  633. int irq;
  634. int index;
  635. @@ -101,14 +103,16 @@ wpdma_txfree_w32(struct mtk_wed_device *
  636. }
  637. void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
  638. - void __iomem *wdma, int index);
  639. + void __iomem *wdma, phys_addr_t wdma_phy,
  640. + int index);
  641. void mtk_wed_exit(void);
  642. int mtk_wed_flow_add(int index);
  643. void mtk_wed_flow_remove(int index);
  644. #else
  645. static inline void
  646. mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
  647. - void __iomem *wdma, int index)
  648. + void __iomem *wdma, phys_addr_t wdma_phy,
  649. + int index)
  650. {
  651. }
  652. static inline void
  653. --- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
  654. +++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
  655. @@ -116,6 +116,9 @@ wed_txinfo_show(struct seq_file *s, void
  656. DUMP_WDMA(WDMA_GLO_CFG),
  657. DUMP_WDMA_RING(WDMA_RING_RX(0)),
  658. DUMP_WDMA_RING(WDMA_RING_RX(1)),
  659. +
  660. + DUMP_STR("TX FREE"),
  661. + DUMP_WED(WED_RX_MIB(0)),
  662. };
  663. struct mtk_wed_hw *hw = s->private;
  664. struct mtk_wed_device *dev = hw->wed_dev;
  665. --- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
  666. +++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
  667. @@ -5,6 +5,7 @@
  668. #define __MTK_WED_REGS_H
  669. #define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
  670. +#define MTK_WDMA_DESC_CTRL_LEN1_V2 GENMASK(13, 0)
  671. #define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
  672. #define MTK_WDMA_DESC_CTRL_BURST BIT(16)
  673. #define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
  674. @@ -41,6 +42,7 @@ struct mtk_wdma_desc {
  675. #define MTK_WED_CTRL_RESERVE_EN BIT(12)
  676. #define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
  677. #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
  678. +#define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
  679. #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
  680. #define MTK_WED_EXT_INT_STATUS 0x020
  681. @@ -57,7 +59,8 @@ struct mtk_wdma_desc {
  682. #define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19)
  683. #define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20)
  684. #define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21)
  685. -#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22)
  686. +#define MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR BIT(22)
  687. +#define MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR BIT(23)
  688. #define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24)
  689. #define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
  690. MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
  691. @@ -65,8 +68,7 @@ struct mtk_wdma_desc {
  692. MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
  693. MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
  694. MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
  695. - MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \
  696. - MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR)
  697. + MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR)
  698. #define MTK_WED_EXT_INT_MASK 0x028
  699. @@ -81,6 +83,7 @@ struct mtk_wdma_desc {
  700. #define MTK_WED_TX_BM_BASE 0x084
  701. #define MTK_WED_TX_BM_TKID 0x088
  702. +#define MTK_WED_TX_BM_TKID_V2 0x0c8
  703. #define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
  704. #define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
  705. @@ -94,7 +97,25 @@ struct mtk_wdma_desc {
  706. #define MTK_WED_TX_BM_DYN_THR 0x0a0
  707. #define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
  708. +#define MTK_WED_TX_BM_DYN_THR_LO_V2 GENMASK(8, 0)
  709. #define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
  710. +#define MTK_WED_TX_BM_DYN_THR_HI_V2 GENMASK(24, 16)
  711. +
  712. +#define MTK_WED_TX_TKID_CTRL 0x0c0
  713. +#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM GENMASK(6, 0)
  714. +#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
  715. +#define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
  716. +
  717. +#define MTK_WED_TX_TKID_DYN_THR 0x0e0
  718. +#define MTK_WED_TX_TKID_DYN_THR_LO GENMASK(6, 0)
  719. +#define MTK_WED_TX_TKID_DYN_THR_HI GENMASK(22, 16)
  720. +
  721. +#define MTK_WED_TXP_DW0 0x120
  722. +#define MTK_WED_TXP_DW1 0x124
  723. +#define MTK_WED_WPDMA_WRITE_TXP GENMASK(31, 16)
  724. +#define MTK_WED_TXDP_CTRL 0x130
  725. +#define MTK_WED_TXDP_DW9_OVERWR BIT(9)
  726. +#define MTK_WED_RX_BM_TKID_MIB 0x1cc
  727. #define MTK_WED_INT_STATUS 0x200
  728. #define MTK_WED_INT_MASK 0x204
  729. @@ -125,6 +146,7 @@ struct mtk_wdma_desc {
  730. #define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
  731. #define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
  732. +#define MTK_WED_RX_MIB(_n) (0x2e0 + (_n) * 4)
  733. #define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10)
  734. @@ -155,21 +177,62 @@ struct mtk_wdma_desc {
  735. #define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29)
  736. #define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
  737. +/* CONFIG_MEDIATEK_NETSYS_V2 */
  738. +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC BIT(4)
  739. +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5)
  740. +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6)
  741. +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7)
  742. +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16)
  743. +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19)
  744. +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20)
  745. +#define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21)
  746. +#define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24)
  747. +#define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28)
  748. +
  749. #define MTK_WED_WPDMA_RESET_IDX 0x50c
  750. #define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
  751. #define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16)
  752. +#define MTK_WED_WPDMA_CTRL 0x518
  753. +#define MTK_WED_WPDMA_CTRL_SDL1_FIXED BIT(31)
  754. +
  755. #define MTK_WED_WPDMA_INT_CTRL 0x520
  756. #define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21)
  757. #define MTK_WED_WPDMA_INT_MASK 0x524
  758. +#define MTK_WED_WPDMA_INT_CTRL_TX 0x530
  759. +#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN BIT(0)
  760. +#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
  761. +#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG GENMASK(6, 2)
  762. +#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN BIT(8)
  763. +#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR BIT(9)
  764. +#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10)
  765. +
  766. +#define MTK_WED_WPDMA_INT_CTRL_RX 0x534
  767. +
  768. +#define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538
  769. +#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0)
  770. +#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR BIT(1)
  771. +#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG GENMASK(6, 2)
  772. +
  773. #define MTK_WED_PCIE_CFG_BASE 0x560
  774. +#define MTK_WED_PCIE_CFG_BASE 0x560
  775. +#define MTK_WED_PCIE_CFG_INTM 0x564
  776. +#define MTK_WED_PCIE_CFG_MSIS 0x568
  777. #define MTK_WED_PCIE_INT_TRIGGER 0x570
  778. #define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
  779. +#define MTK_WED_PCIE_INT_CTRL 0x57c
  780. +#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
  781. +#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
  782. +#define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12)
  783. +
  784. #define MTK_WED_WPDMA_CFG_BASE 0x580
  785. +#define MTK_WED_WPDMA_CFG_INT_MASK 0x584
  786. +#define MTK_WED_WPDMA_CFG_TX 0x588
  787. +#define MTK_WED_WPDMA_CFG_TX_FREE 0x58c
  788. #define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
  789. #define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
  790. @@ -203,15 +266,24 @@ struct mtk_wdma_desc {
  791. #define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
  792. #define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
  793. +#define MTK_WED_WDMA_INT_CLR 0xa24
  794. +#define MTK_WED_WDMA_INT_CLR_RX_DONE GENMASK(17, 16)
  795. +
  796. #define MTK_WED_WDMA_INT_TRIGGER 0xa28
  797. #define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
  798. #define MTK_WED_WDMA_INT_CTRL 0xa2c
  799. #define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
  800. +#define MTK_WED_WDMA_CFG_BASE 0xaa0
  801. #define MTK_WED_WDMA_OFFSET0 0xaa4
  802. #define MTK_WED_WDMA_OFFSET1 0xaa8
  803. +#define MTK_WED_WDMA_OFST0_GLO_INTS GENMASK(15, 0)
  804. +#define MTK_WED_WDMA_OFST0_GLO_CFG GENMASK(31, 16)
  805. +#define MTK_WED_WDMA_OFST1_TX_CTRL GENMASK(15, 0)
  806. +#define MTK_WED_WDMA_OFST1_RX_CTRL GENMASK(31, 16)
  807. +
  808. #define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4)
  809. #define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
  810. #define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
  811. @@ -221,6 +293,7 @@ struct mtk_wdma_desc {
  812. #define MTK_WED_RING_OFS_CPU_IDX 0x08
  813. #define MTK_WED_RING_OFS_DMA_IDX 0x0c
  814. +#define MTK_WDMA_RING_TX(_n) (0x000 + (_n) * 0x10)
  815. #define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10)
  816. #define MTK_WDMA_GLO_CFG 0x204
  817. @@ -234,6 +307,8 @@ struct mtk_wdma_desc {
  818. #define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
  819. #define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
  820. +#define MTK_WDMA_INT_STATUS 0x220
  821. +
  822. #define MTK_WDMA_INT_MASK 0x228
  823. #define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0)
  824. #define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16)
  825. --- a/include/linux/soc/mediatek/mtk_wed.h
  826. +++ b/include/linux/soc/mediatek/mtk_wed.h
  827. @@ -14,6 +14,7 @@ struct mtk_wdma_desc;
  828. struct mtk_wed_ring {
  829. struct mtk_wdma_desc *desc;
  830. dma_addr_t desc_phys;
  831. + u32 desc_size;
  832. int size;
  833. u32 reg_base;
  834. @@ -45,10 +46,17 @@ struct mtk_wed_device {
  835. struct pci_dev *pci_dev;
  836. u32 wpdma_phys;
  837. + u32 wpdma_int;
  838. + u32 wpdma_mask;
  839. + u32 wpdma_tx;
  840. + u32 wpdma_txfree;
  841. u16 token_start;
  842. unsigned int nbuf;
  843. + u8 tx_tbit[MTK_WED_TX_QUEUES];
  844. + u8 txfree_tbit;
  845. +
  846. u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
  847. int (*offload_enable)(struct mtk_wed_device *wed);
  848. void (*offload_disable)(struct mtk_wed_device *wed);