752-20-v6.7-net-ethernet-mtk_wed-add-wed-3.0-reset-support.patch 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587
  1. From: Sujuan Chen <[email protected]>
  2. Date: Mon, 18 Sep 2023 12:29:19 +0200
  3. Subject: [PATCH] net: ethernet: mtk_wed: add wed 3.0 reset support
  4. Introduce support for resetting Wireless Ethernet Dispatcher 3.0
  5. available on MT988 SoC.
  6. Co-developed-by: Lorenzo Bianconi <[email protected]>
  7. Signed-off-by: Lorenzo Bianconi <[email protected]>
  8. Signed-off-by: Sujuan Chen <[email protected]>
  9. Signed-off-by: Paolo Abeni <[email protected]>
  10. ---
  11. --- a/drivers/net/ethernet/mediatek/mtk_wed.c
  12. +++ b/drivers/net/ethernet/mediatek/mtk_wed.c
  13. @@ -149,6 +149,90 @@ mtk_wdma_read_reset(struct mtk_wed_devic
  14. return wdma_r32(dev, MTK_WDMA_GLO_CFG);
  15. }
  16. +static void
  17. +mtk_wdma_v3_rx_reset(struct mtk_wed_device *dev)
  18. +{
  19. + u32 status;
  20. +
  21. + if (!mtk_wed_is_v3_or_greater(dev->hw))
  22. + return;
  23. +
  24. + wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
  25. + wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
  26. +
  27. + if (read_poll_timeout(wdma_r32, status,
  28. + !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY),
  29. + 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG))
  30. + dev_err(dev->hw->dev, "rx reset failed\n");
  31. +
  32. + if (read_poll_timeout(wdma_r32, status,
  33. + !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY),
  34. + 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG))
  35. + dev_err(dev->hw->dev, "rx reset failed\n");
  36. +
  37. + wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
  38. + wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
  39. +
  40. + if (read_poll_timeout(wdma_r32, status,
  41. + !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY),
  42. + 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG))
  43. + dev_err(dev->hw->dev, "rx reset failed\n");
  44. +
  45. + if (read_poll_timeout(wdma_r32, status,
  46. + !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY),
  47. + 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG))
  48. + dev_err(dev->hw->dev, "rx reset failed\n");
  49. +
  50. + /* prefetch FIFO */
  51. + wdma_w32(dev, MTK_WDMA_PREF_RX_FIFO_CFG,
  52. + MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR |
  53. + MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR);
  54. + wdma_clr(dev, MTK_WDMA_PREF_RX_FIFO_CFG,
  55. + MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR |
  56. + MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR);
  57. +
  58. + /* core FIFO */
  59. + wdma_w32(dev, MTK_WDMA_XDMA_RX_FIFO_CFG,
  60. + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR |
  61. + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR |
  62. + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR |
  63. + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR |
  64. + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR |
  65. + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR |
  66. + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR);
  67. + wdma_clr(dev, MTK_WDMA_XDMA_RX_FIFO_CFG,
  68. + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR |
  69. + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR |
  70. + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR |
  71. + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR |
  72. + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR |
  73. + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR |
  74. + MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR);
  75. +
  76. + /* writeback FIFO */
  77. + wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0),
  78. + MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
  79. + wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1),
  80. + MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
  81. +
  82. + wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0),
  83. + MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
  84. + wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1),
  85. + MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
  86. +
  87. + /* prefetch ring status */
  88. + wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG,
  89. + MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR);
  90. + wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG,
  91. + MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR);
  92. +
  93. + /* writeback ring status */
  94. + wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG,
  95. + MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR);
  96. + wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG,
  97. + MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR);
  98. +}
  99. +
  100. static int
  101. mtk_wdma_rx_reset(struct mtk_wed_device *dev)
  102. {
  103. @@ -161,6 +245,7 @@ mtk_wdma_rx_reset(struct mtk_wed_device
  104. if (ret)
  105. dev_err(dev->hw->dev, "rx reset failed\n");
  106. + mtk_wdma_v3_rx_reset(dev);
  107. wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
  108. wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
  109. @@ -193,6 +278,84 @@ mtk_wed_poll_busy(struct mtk_wed_device
  110. }
  111. static void
  112. +mtk_wdma_v3_tx_reset(struct mtk_wed_device *dev)
  113. +{
  114. + u32 status;
  115. +
  116. + if (!mtk_wed_is_v3_or_greater(dev->hw))
  117. + return;
  118. +
  119. + wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
  120. + wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
  121. +
  122. + if (read_poll_timeout(wdma_r32, status,
  123. + !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY),
  124. + 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG))
  125. + dev_err(dev->hw->dev, "tx reset failed\n");
  126. +
  127. + if (read_poll_timeout(wdma_r32, status,
  128. + !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY),
  129. + 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG))
  130. + dev_err(dev->hw->dev, "tx reset failed\n");
  131. +
  132. + wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
  133. + wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
  134. +
  135. + if (read_poll_timeout(wdma_r32, status,
  136. + !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY),
  137. + 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG))
  138. + dev_err(dev->hw->dev, "tx reset failed\n");
  139. +
  140. + if (read_poll_timeout(wdma_r32, status,
  141. + !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY),
  142. + 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG))
  143. + dev_err(dev->hw->dev, "tx reset failed\n");
  144. +
  145. + /* prefetch FIFO */
  146. + wdma_w32(dev, MTK_WDMA_PREF_TX_FIFO_CFG,
  147. + MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR |
  148. + MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR);
  149. + wdma_clr(dev, MTK_WDMA_PREF_TX_FIFO_CFG,
  150. + MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR |
  151. + MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR);
  152. +
  153. + /* core FIFO */
  154. + wdma_w32(dev, MTK_WDMA_XDMA_TX_FIFO_CFG,
  155. + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR |
  156. + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR |
  157. + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR |
  158. + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR);
  159. + wdma_clr(dev, MTK_WDMA_XDMA_TX_FIFO_CFG,
  160. + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR |
  161. + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR |
  162. + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR |
  163. + MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR);
  164. +
  165. + /* writeback FIFO */
  166. + wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0),
  167. + MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
  168. + wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1),
  169. + MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
  170. +
  171. + wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0),
  172. + MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
  173. + wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1),
  174. + MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
  175. +
  176. + /* prefetch ring status */
  177. + wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG,
  178. + MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR);
  179. + wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG,
  180. + MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR);
  181. +
  182. + /* writeback ring status */
  183. + wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG,
  184. + MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR);
  185. + wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG,
  186. + MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR);
  187. +}
  188. +
  189. +static void
  190. mtk_wdma_tx_reset(struct mtk_wed_device *dev)
  191. {
  192. u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY;
  193. @@ -203,6 +366,7 @@ mtk_wdma_tx_reset(struct mtk_wed_device
  194. !(status & mask), 0, 10000))
  195. dev_err(dev->hw->dev, "tx reset failed\n");
  196. + mtk_wdma_v3_tx_reset(dev);
  197. wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
  198. wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
  199. @@ -1406,13 +1570,33 @@ mtk_wed_rx_reset(struct mtk_wed_device *
  200. if (ret)
  201. return ret;
  202. + if (dev->wlan.hw_rro) {
  203. + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
  204. + mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_HW_STS,
  205. + MTK_WED_RX_IND_CMD_BUSY);
  206. + mtk_wed_reset(dev, MTK_WED_RESET_RRO_RX_TO_PG);
  207. + }
  208. +
  209. wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN);
  210. ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
  211. MTK_WED_WPDMA_RX_D_RX_DRV_BUSY);
  212. + if (!ret && mtk_wed_is_v3_or_greater(dev->hw))
  213. + ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
  214. + MTK_WED_WPDMA_RX_D_PREF_BUSY);
  215. if (ret) {
  216. mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
  217. mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV);
  218. } else {
  219. + if (mtk_wed_is_v3_or_greater(dev->hw)) {
  220. + /* 1.a. disable prefetch HW */
  221. + wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
  222. + MTK_WED_WPDMA_RX_D_PREF_EN);
  223. + mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
  224. + MTK_WED_WPDMA_RX_D_PREF_BUSY);
  225. + wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
  226. + MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL);
  227. + }
  228. +
  229. wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
  230. MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
  231. MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
  232. @@ -1440,23 +1624,52 @@ mtk_wed_rx_reset(struct mtk_wed_device *
  233. wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
  234. }
  235. + if (dev->wlan.hw_rro) {
  236. + /* disable rro msdu page drv */
  237. + wed_clr(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
  238. + MTK_WED_RRO_MSDU_PG_DRV_EN);
  239. +
  240. + /* disable rro data drv */
  241. + wed_clr(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN);
  242. +
  243. + /* rro msdu page drv reset */
  244. + wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
  245. + MTK_WED_RRO_MSDU_PG_DRV_CLR);
  246. + mtk_wed_poll_busy(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
  247. + MTK_WED_RRO_MSDU_PG_DRV_CLR);
  248. +
  249. + /* rro data drv reset */
  250. + wed_w32(dev, MTK_WED_RRO_RX_D_CFG(2),
  251. + MTK_WED_RRO_RX_D_DRV_CLR);
  252. + mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_D_CFG(2),
  253. + MTK_WED_RRO_RX_D_DRV_CLR);
  254. + }
  255. +
  256. /* reset route qm */
  257. wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
  258. ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
  259. MTK_WED_CTRL_RX_ROUTE_QM_BUSY);
  260. - if (ret)
  261. + if (ret) {
  262. mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
  263. - else
  264. - wed_set(dev, MTK_WED_RTQM_GLO_CFG,
  265. - MTK_WED_RTQM_Q_RST);
  266. + } else if (mtk_wed_is_v3_or_greater(dev->hw)) {
  267. + wed_set(dev, MTK_WED_RTQM_RST, BIT(0));
  268. + wed_clr(dev, MTK_WED_RTQM_RST, BIT(0));
  269. + mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
  270. + } else {
  271. + wed_set(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
  272. + }
  273. /* reset tx wdma */
  274. mtk_wdma_tx_reset(dev);
  275. /* reset tx wdma drv */
  276. wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN);
  277. - mtk_wed_poll_busy(dev, MTK_WED_CTRL,
  278. - MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
  279. + if (mtk_wed_is_v3_or_greater(dev->hw))
  280. + mtk_wed_poll_busy(dev, MTK_WED_WPDMA_STATUS,
  281. + MTK_WED_WPDMA_STATUS_TX_DRV);
  282. + else
  283. + mtk_wed_poll_busy(dev, MTK_WED_CTRL,
  284. + MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
  285. mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV);
  286. /* reset wed rx dma */
  287. @@ -1477,6 +1690,14 @@ mtk_wed_rx_reset(struct mtk_wed_device *
  288. MTK_WED_CTRL_WED_RX_BM_BUSY);
  289. mtk_wed_reset(dev, MTK_WED_RESET_RX_BM);
  290. + if (dev->wlan.hw_rro) {
  291. + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
  292. + mtk_wed_poll_busy(dev, MTK_WED_CTRL,
  293. + MTK_WED_CTRL_WED_RX_PG_BM_BUSY);
  294. + wed_set(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM);
  295. + wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM);
  296. + }
  297. +
  298. /* wo change to enable state */
  299. val = MTK_WED_WO_STATE_ENABLE;
  300. ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
  301. @@ -1494,6 +1715,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *
  302. false);
  303. }
  304. mtk_wed_free_rx_buffer(dev);
  305. + mtk_wed_hwrro_free_buffer(dev);
  306. return 0;
  307. }
  308. @@ -1527,15 +1749,41 @@ mtk_wed_reset_dma(struct mtk_wed_device
  309. /* 2. reset WDMA rx DMA */
  310. busy = !!mtk_wdma_rx_reset(dev);
  311. - wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
  312. + if (mtk_wed_is_v3_or_greater(dev->hw)) {
  313. + val = MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE |
  314. + wed_r32(dev, MTK_WED_WDMA_GLO_CFG);
  315. + val &= ~MTK_WED_WDMA_GLO_CFG_RX_DRV_EN;
  316. + wed_w32(dev, MTK_WED_WDMA_GLO_CFG, val);
  317. + } else {
  318. + wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
  319. + MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
  320. + }
  321. +
  322. if (!busy)
  323. busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG,
  324. MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY);
  325. + if (!busy && mtk_wed_is_v3_or_greater(dev->hw))
  326. + busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG,
  327. + MTK_WED_WDMA_RX_PREF_BUSY);
  328. if (busy) {
  329. mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
  330. mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
  331. } else {
  332. + if (mtk_wed_is_v3_or_greater(dev->hw)) {
  333. + /* 1.a. disable prefetch HW */
  334. + wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
  335. + MTK_WED_WDMA_RX_PREF_EN);
  336. + mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG,
  337. + MTK_WED_WDMA_RX_PREF_BUSY);
  338. + wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
  339. + MTK_WED_WDMA_RX_PREF_DDONE2_EN);
  340. +
  341. + /* 2. Reset dma index */
  342. + wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
  343. + MTK_WED_WDMA_RESET_IDX_RX_ALL);
  344. + }
  345. +
  346. wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
  347. MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
  348. wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
  349. @@ -1551,8 +1799,13 @@ mtk_wed_reset_dma(struct mtk_wed_device
  350. wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
  351. for (i = 0; i < 100; i++) {
  352. - val = wed_r32(dev, MTK_WED_TX_BM_INTF);
  353. - if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
  354. + if (mtk_wed_is_v1(dev->hw))
  355. + val = FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP,
  356. + wed_r32(dev, MTK_WED_TX_BM_INTF));
  357. + else
  358. + val = FIELD_GET(MTK_WED_TX_TKID_INTF_TKFIFO_FDEP,
  359. + wed_r32(dev, MTK_WED_TX_TKID_INTF));
  360. + if (val == 0x40)
  361. break;
  362. }
  363. @@ -1574,6 +1827,8 @@ mtk_wed_reset_dma(struct mtk_wed_device
  364. mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
  365. mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
  366. mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
  367. + if (mtk_wed_is_v3_or_greater(dev->hw))
  368. + wed_w32(dev, MTK_WED_RX1_CTRL2, 0);
  369. } else {
  370. wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
  371. MTK_WED_WPDMA_RESET_IDX_TX |
  372. @@ -1590,7 +1845,14 @@ mtk_wed_reset_dma(struct mtk_wed_device
  373. wed_w32(dev, MTK_WED_RESET_IDX, 0);
  374. }
  375. - mtk_wed_rx_reset(dev);
  376. + if (mtk_wed_is_v3_or_greater(dev->hw)) {
  377. + /* reset amsdu engine */
  378. + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
  379. + mtk_wed_reset(dev, MTK_WED_RESET_TX_AMSDU);
  380. + }
  381. +
  382. + if (mtk_wed_get_rx_capa(dev))
  383. + mtk_wed_rx_reset(dev);
  384. }
  385. static int
  386. @@ -1842,6 +2104,7 @@ mtk_wed_dma_enable(struct mtk_wed_device
  387. MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4);
  388. wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
  389. + wdma_set(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
  390. }
  391. wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
  392. @@ -1905,6 +2168,12 @@ mtk_wed_start_hw_rro(struct mtk_wed_devi
  393. if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
  394. return;
  395. + if (reset) {
  396. + wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
  397. + MTK_WED_RRO_MSDU_PG_DRV_EN);
  398. + return;
  399. + }
  400. +
  401. wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR);
  402. wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
  403. MTK_WED_RRO_MSDU_PG_DRV_CLR);
  404. --- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
  405. +++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
  406. @@ -28,6 +28,8 @@ struct mtk_wdma_desc {
  407. #define MTK_WED_RESET 0x008
  408. #define MTK_WED_RESET_TX_BM BIT(0)
  409. #define MTK_WED_RESET_RX_BM BIT(1)
  410. +#define MTK_WED_RESET_RX_PG_BM BIT(2)
  411. +#define MTK_WED_RESET_RRO_RX_TO_PG BIT(3)
  412. #define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
  413. #define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
  414. #define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
  415. @@ -106,6 +108,9 @@ struct mtk_wdma_desc {
  416. #define MTK_WED_STATUS 0x060
  417. #define MTK_WED_STATUS_TX GENMASK(15, 8)
  418. +#define MTK_WED_WPDMA_STATUS 0x068
  419. +#define MTK_WED_WPDMA_STATUS_TX_DRV GENMASK(15, 8)
  420. +
  421. #define MTK_WED_TX_BM_CTRL 0x080
  422. #define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
  423. #define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
  424. @@ -140,6 +145,9 @@ struct mtk_wdma_desc {
  425. #define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
  426. #define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
  427. +#define MTK_WED_TX_TKID_INTF 0x0dc
  428. +#define MTK_WED_TX_TKID_INTF_TKFIFO_FDEP GENMASK(25, 16)
  429. +
  430. #define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3 GENMASK(7, 0)
  431. #define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3 GENMASK(23, 16)
  432. @@ -190,6 +198,7 @@ struct mtk_wdma_desc {
  433. #define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10)
  434. #define MTK_WED_SCR0 0x3c0
  435. +#define MTK_WED_RX1_CTRL2 0x418
  436. #define MTK_WED_WPDMA_INT_TRIGGER 0x504
  437. #define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
  438. #define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
  439. @@ -303,6 +312,7 @@ struct mtk_wdma_desc {
  440. #define MTK_WED_WPDMA_RX_D_RST_IDX 0x760
  441. #define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16)
  442. +#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL BIT(20)
  443. #define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
  444. #define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
  445. @@ -313,6 +323,7 @@ struct mtk_wdma_desc {
  446. #define MTK_WED_WPDMA_RX_D_PREF_CFG 0x7b4
  447. #define MTK_WED_WPDMA_RX_D_PREF_EN BIT(0)
  448. +#define MTK_WED_WPDMA_RX_D_PREF_BUSY BIT(1)
  449. #define MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE GENMASK(12, 8)
  450. #define MTK_WED_WPDMA_RX_D_PREF_LOW_THRES GENMASK(21, 16)
  451. @@ -334,11 +345,13 @@ struct mtk_wdma_desc {
  452. #define MTK_WED_WDMA_RX_PREF_CFG 0x950
  453. #define MTK_WED_WDMA_RX_PREF_EN BIT(0)
  454. +#define MTK_WED_WDMA_RX_PREF_BUSY BIT(1)
  455. #define MTK_WED_WDMA_RX_PREF_BURST_SIZE GENMASK(12, 8)
  456. #define MTK_WED_WDMA_RX_PREF_LOW_THRES GENMASK(21, 16)
  457. #define MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR BIT(24)
  458. #define MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR BIT(25)
  459. #define MTK_WED_WDMA_RX_PREF_DDONE2_EN BIT(26)
  460. +#define MTK_WED_WDMA_RX_PREF_DDONE2_BUSY BIT(27)
  461. #define MTK_WED_WDMA_RX_PREF_FIFO_CFG 0x95C
  462. #define MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR BIT(0)
  463. @@ -367,6 +380,7 @@ struct mtk_wdma_desc {
  464. #define MTK_WED_WDMA_RESET_IDX 0xa08
  465. #define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
  466. +#define MTK_WED_WDMA_RESET_IDX_RX_ALL BIT(20)
  467. #define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
  468. #define MTK_WED_WDMA_INT_CLR 0xa24
  469. @@ -437,21 +451,62 @@ struct mtk_wdma_desc {
  470. #define MTK_WDMA_INT_MASK_RX_DELAY BIT(30)
  471. #define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31)
  472. +#define MTK_WDMA_XDMA_TX_FIFO_CFG 0x238
  473. +#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR BIT(0)
  474. +#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR BIT(4)
  475. +#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR BIT(8)
  476. +#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR BIT(12)
  477. +
  478. +#define MTK_WDMA_XDMA_RX_FIFO_CFG 0x23c
  479. +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR BIT(0)
  480. +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR BIT(4)
  481. +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR BIT(8)
  482. +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR BIT(12)
  483. +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR BIT(15)
  484. +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR BIT(18)
  485. +#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR BIT(21)
  486. +
  487. #define MTK_WDMA_INT_GRP1 0x250
  488. #define MTK_WDMA_INT_GRP2 0x254
  489. #define MTK_WDMA_PREF_TX_CFG 0x2d0
  490. #define MTK_WDMA_PREF_TX_CFG_PREF_EN BIT(0)
  491. +#define MTK_WDMA_PREF_TX_CFG_PREF_BUSY BIT(1)
  492. #define MTK_WDMA_PREF_RX_CFG 0x2dc
  493. #define MTK_WDMA_PREF_RX_CFG_PREF_EN BIT(0)
  494. +#define MTK_WDMA_PREF_RX_CFG_PREF_BUSY BIT(1)
  495. +
  496. +#define MTK_WDMA_PREF_RX_FIFO_CFG 0x2e0
  497. +#define MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR BIT(0)
  498. +#define MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR BIT(16)
  499. +
  500. +#define MTK_WDMA_PREF_TX_FIFO_CFG 0x2d4
  501. +#define MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR BIT(0)
  502. +#define MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR BIT(16)
  503. +
  504. +#define MTK_WDMA_PREF_SIDX_CFG 0x2e4
  505. +#define MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR GENMASK(3, 0)
  506. +#define MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR GENMASK(5, 4)
  507. #define MTK_WDMA_WRBK_TX_CFG 0x300
  508. +#define MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY BIT(0)
  509. #define MTK_WDMA_WRBK_TX_CFG_WRBK_EN BIT(30)
  510. +#define MTK_WDMA_WRBK_TX_FIFO_CFG(_n) (0x304 + (_n) * 0x4)
  511. +#define MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR BIT(0)
  512. +
  513. #define MTK_WDMA_WRBK_RX_CFG 0x344
  514. +#define MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY BIT(0)
  515. #define MTK_WDMA_WRBK_RX_CFG_WRBK_EN BIT(30)
  516. +#define MTK_WDMA_WRBK_RX_FIFO_CFG(_n) (0x348 + (_n) * 0x4)
  517. +#define MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR BIT(0)
  518. +
  519. +#define MTK_WDMA_WRBK_SIDX_CFG 0x388
  520. +#define MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR GENMASK(3, 0)
  521. +#define MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR GENMASK(5, 4)
  522. +
  523. #define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
  524. #define MTK_PCIE_MIRROR_MAP_EN BIT(0)
  525. #define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
  526. @@ -465,6 +520,8 @@ struct mtk_wdma_desc {
  527. #define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
  528. #define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
  529. +#define MTK_WED_RTQM_RST 0xb04
  530. +
  531. #define MTK_WED_RTQM_IGRS0_I2HW_DMAD_CNT 0xb1c
  532. #define MTK_WED_RTQM_IGRS0_I2H_DMAD_CNT(_n) (0xb20 + (_n) * 0x4)
  533. #define MTK_WED_RTQM_IGRS0_I2HW_PKT_CNT 0xb28
  534. @@ -653,6 +710,9 @@ struct mtk_wdma_desc {
  535. #define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR BIT(17)
  536. #define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG GENMASK(22, 18)
  537. +#define MTK_WED_RRO_RX_HW_STS 0xf00
  538. +#define MTK_WED_RX_IND_CMD_BUSY GENMASK(31, 0)
  539. +
  540. #define MTK_WED_RX_IND_CMD_CNT0 0xf20
  541. #define MTK_WED_RX_IND_CMD_DBG_CNT_EN BIT(31)