729-13-v6.2-net-ethernet-mtk_wed-add-mtk_wed_rx_reset-routine.patch 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. From: Lorenzo Bianconi <[email protected]>
  2. Date: Thu, 24 Nov 2022 16:22:54 +0100
  3. Subject: [PATCH] net: ethernet: mtk_wed: add mtk_wed_rx_reset routine
  4. Introduce mtk_wed_rx_reset routine in order to reset rx DMA for Wireless
  5. Ethernet Dispatcher available on MT7986 SoC.
  6. Co-developed-by: Sujuan Chen <[email protected]>
  7. Signed-off-by: Sujuan Chen <[email protected]>
  8. Signed-off-by: Lorenzo Bianconi <[email protected]>
  9. Signed-off-by: Paolo Abeni <[email protected]>
  10. ---
  11. --- a/drivers/net/ethernet/mediatek/mtk_wed.c
  12. +++ b/drivers/net/ethernet/mediatek/mtk_wed.c
  13. @@ -951,42 +951,130 @@ mtk_wed_ring_reset(struct mtk_wed_ring *
  14. }
  15. static u32
  16. -mtk_wed_check_busy(struct mtk_wed_device *dev)
  17. +mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
  18. {
  19. - if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
  20. - return true;
  21. -
  22. - if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
  23. - MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
  24. - return true;
  25. -
  26. - if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
  27. - return true;
  28. -
  29. - if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
  30. - MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
  31. - return true;
  32. -
  33. - if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
  34. - MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
  35. - return true;
  36. -
  37. - if (wed_r32(dev, MTK_WED_CTRL) &
  38. - (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
  39. - return true;
  40. -
  41. - return false;
  42. + return !!(wed_r32(dev, reg) & mask);
  43. }
  44. static int
  45. -mtk_wed_poll_busy(struct mtk_wed_device *dev)
  46. +mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
  47. {
  48. int sleep = 15000;
  49. int timeout = 100 * sleep;
  50. u32 val;
  51. return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
  52. - timeout, false, dev);
  53. + timeout, false, dev, reg, mask);
  54. +}
  55. +
  56. +static int
  57. +mtk_wed_rx_reset(struct mtk_wed_device *dev)
  58. +{
  59. + struct mtk_wed_wo *wo = dev->hw->wed_wo;
  60. + u8 val = MTK_WED_WO_STATE_SER_RESET;
  61. + int i, ret;
  62. +
  63. + ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
  64. + MTK_WED_WO_CMD_CHANGE_STATE, &val,
  65. + sizeof(val), true);
  66. + if (ret)
  67. + return ret;
  68. +
  69. + wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN);
  70. + ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
  71. + MTK_WED_WPDMA_RX_D_RX_DRV_BUSY);
  72. + if (ret) {
  73. + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
  74. + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV);
  75. + } else {
  76. + wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
  77. + MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
  78. + MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
  79. +
  80. + wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
  81. + MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
  82. + MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
  83. + wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
  84. + MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
  85. + MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
  86. +
  87. + wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
  88. + }
  89. +
  90. + /* reset rro qm */
  91. + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
  92. + ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
  93. + MTK_WED_CTRL_RX_RRO_QM_BUSY);
  94. + if (ret) {
  95. + mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM);
  96. + } else {
  97. + wed_set(dev, MTK_WED_RROQM_RST_IDX,
  98. + MTK_WED_RROQM_RST_IDX_MIOD |
  99. + MTK_WED_RROQM_RST_IDX_FDBK);
  100. + wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
  101. + }
  102. +
  103. + /* reset route qm */
  104. + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
  105. + ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
  106. + MTK_WED_CTRL_RX_ROUTE_QM_BUSY);
  107. + if (ret)
  108. + mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
  109. + else
  110. + wed_set(dev, MTK_WED_RTQM_GLO_CFG,
  111. + MTK_WED_RTQM_Q_RST);
  112. +
  113. + /* reset tx wdma */
  114. + mtk_wdma_tx_reset(dev);
  115. +
  116. + /* reset tx wdma drv */
  117. + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN);
  118. + mtk_wed_poll_busy(dev, MTK_WED_CTRL,
  119. + MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
  120. + mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV);
  121. +
  122. + /* reset wed rx dma */
  123. + ret = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
  124. + MTK_WED_GLO_CFG_RX_DMA_BUSY);
  125. + wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN);
  126. + if (ret) {
  127. + mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA);
  128. + } else {
  129. + struct mtk_eth *eth = dev->hw->eth;
  130. +
  131. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  132. + wed_set(dev, MTK_WED_RESET_IDX,
  133. + MTK_WED_RESET_IDX_RX_V2);
  134. + else
  135. + wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX);
  136. + wed_w32(dev, MTK_WED_RESET_IDX, 0);
  137. + }
  138. +
  139. + /* reset rx bm */
  140. + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
  141. + mtk_wed_poll_busy(dev, MTK_WED_CTRL,
  142. + MTK_WED_CTRL_WED_RX_BM_BUSY);
  143. + mtk_wed_reset(dev, MTK_WED_RESET_RX_BM);
  144. +
  145. + /* wo change to enable state */
  146. + val = MTK_WED_WO_STATE_ENABLE;
  147. + ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
  148. + MTK_WED_WO_CMD_CHANGE_STATE, &val,
  149. + sizeof(val), true);
  150. + if (ret)
  151. + return ret;
  152. +
  153. + /* wed_rx_ring_reset */
  154. + for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) {
  155. + if (!dev->rx_ring[i].desc)
  156. + continue;
  157. +
  158. + mtk_wed_ring_reset(&dev->rx_ring[i], MTK_WED_RX_RING_SIZE,
  159. + false);
  160. + }
  161. + mtk_wed_free_rx_buffer(dev);
  162. +
  163. + return 0;
  164. }
  165. static void
  166. @@ -1004,19 +1092,23 @@ mtk_wed_reset_dma(struct mtk_wed_device
  167. true);
  168. }
  169. - if (mtk_wed_poll_busy(dev))
  170. - busy = mtk_wed_check_busy(dev);
  171. -
  172. + /* 1. reset WED tx DMA */
  173. + wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN);
  174. + busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
  175. + MTK_WED_GLO_CFG_TX_DMA_BUSY);
  176. if (busy) {
  177. mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
  178. } else {
  179. - wed_w32(dev, MTK_WED_RESET_IDX,
  180. - MTK_WED_RESET_IDX_TX |
  181. - MTK_WED_RESET_IDX_RX);
  182. + wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX);
  183. wed_w32(dev, MTK_WED_RESET_IDX, 0);
  184. }
  185. - mtk_wdma_rx_reset(dev);
  186. + /* 2. reset WDMA rx DMA */
  187. + busy = !!mtk_wdma_rx_reset(dev);
  188. + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
  189. + if (!busy)
  190. + busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG,
  191. + MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY);
  192. if (busy) {
  193. mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
  194. @@ -1033,6 +1125,9 @@ mtk_wed_reset_dma(struct mtk_wed_device
  195. MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
  196. }
  197. + /* 3. reset WED WPDMA tx */
  198. + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
  199. +
  200. for (i = 0; i < 100; i++) {
  201. val = wed_r32(dev, MTK_WED_TX_BM_INTF);
  202. if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
  203. @@ -1040,8 +1135,19 @@ mtk_wed_reset_dma(struct mtk_wed_device
  204. }
  205. mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
  206. + wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN);
  207. mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
  208. + /* 4. reset WED WPDMA tx */
  209. + busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
  210. + MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY);
  211. + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
  212. + MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
  213. + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
  214. + if (!busy)
  215. + busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
  216. + MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY);
  217. +
  218. if (busy) {
  219. mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
  220. mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
  221. @@ -1052,6 +1158,17 @@ mtk_wed_reset_dma(struct mtk_wed_device
  222. MTK_WED_WPDMA_RESET_IDX_RX);
  223. wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
  224. }
  225. +
  226. + dev->init_done = false;
  227. + if (dev->hw->version == 1)
  228. + return;
  229. +
  230. + if (!busy) {
  231. + wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_WPDMA_IDX_RX);
  232. + wed_w32(dev, MTK_WED_RESET_IDX, 0);
  233. + }
  234. +
  235. + mtk_wed_rx_reset(dev);
  236. }
  237. static int
  238. @@ -1274,6 +1391,9 @@ mtk_wed_start(struct mtk_wed_device *dev
  239. {
  240. int i;
  241. + if (mtk_wed_get_rx_capa(dev) && mtk_wed_rx_buffer_alloc(dev))
  242. + return;
  243. +
  244. for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
  245. if (!dev->rx_wdma[i].desc)
  246. mtk_wed_wdma_rx_ring_setup(dev, i, 16);
  247. @@ -1362,10 +1482,6 @@ mtk_wed_attach(struct mtk_wed_device *de
  248. goto out;
  249. if (mtk_wed_get_rx_capa(dev)) {
  250. - ret = mtk_wed_rx_buffer_alloc(dev);
  251. - if (ret)
  252. - goto out;
  253. -
  254. ret = mtk_wed_rro_alloc(dev);
  255. if (ret)
  256. goto out;
  257. --- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
  258. +++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
  259. @@ -24,11 +24,15 @@ struct mtk_wdma_desc {
  260. #define MTK_WED_RESET 0x008
  261. #define MTK_WED_RESET_TX_BM BIT(0)
  262. +#define MTK_WED_RESET_RX_BM BIT(1)
  263. #define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
  264. #define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
  265. #define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
  266. +#define MTK_WED_RESET_WPDMA_RX_D_DRV BIT(10)
  267. #define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11)
  268. #define MTK_WED_RESET_WED_TX_DMA BIT(12)
  269. +#define MTK_WED_RESET_WED_RX_DMA BIT(13)
  270. +#define MTK_WED_RESET_WDMA_TX_DRV BIT(16)
  271. #define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
  272. #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
  273. #define MTK_WED_RESET_RX_RRO_QM BIT(20)
  274. @@ -158,6 +162,8 @@ struct mtk_wdma_desc {
  275. #define MTK_WED_RESET_IDX 0x20c
  276. #define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
  277. #define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
  278. +#define MTK_WED_RESET_IDX_RX_V2 GENMASK(7, 6)
  279. +#define MTK_WED_RESET_WPDMA_IDX_RX GENMASK(31, 30)
  280. #define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
  281. #define MTK_WED_RX_MIB(_n) (0x2e0 + (_n) * 4)
  282. @@ -267,6 +273,9 @@ struct mtk_wdma_desc {
  283. #define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c
  284. #define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0)
  285. +#define MTK_WED_WPDMA_RX_D_RX_DRV_BUSY BIT(1)
  286. +#define MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE BIT(3)
  287. +#define MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE BIT(4)
  288. #define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7)
  289. #define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24)