752-17-v6.7-net-ethernet-mtk_wed-introduce-hw_rro-support-for-MT.patch 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483
  1. From: Sujuan Chen <[email protected]>
  2. Date: Mon, 18 Sep 2023 12:29:16 +0200
  3. Subject: [PATCH] net: ethernet: mtk_wed: introduce hw_rro support for MT7988
  4. MT7988 SoC support 802.11 receive reordering offload in hw while
  5. MT7986 SoC implements it through the firmware running on the mcu.
  6. Co-developed-by: Lorenzo Bianconi <[email protected]>
  7. Signed-off-by: Lorenzo Bianconi <[email protected]>
  8. Signed-off-by: Sujuan Chen <[email protected]>
  9. Signed-off-by: Paolo Abeni <[email protected]>
  10. ---
  11. --- a/drivers/net/ethernet/mediatek/mtk_wed.c
  12. +++ b/drivers/net/ethernet/mediatek/mtk_wed.c
  13. @@ -27,7 +27,7 @@
  14. #define MTK_WED_BUF_SIZE 2048
  15. #define MTK_WED_PAGE_BUF_SIZE 128
  16. #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
  17. -#define MTK_WED_RX_PAGE_BUF_PER_PAGE (PAGE_SIZE / 128)
  18. +#define MTK_WED_RX_BUF_PER_PAGE (PAGE_SIZE / MTK_WED_PAGE_BUF_SIZE)
  19. #define MTK_WED_RX_RING_SIZE 1536
  20. #define MTK_WED_RX_PG_BM_CNT 8192
  21. #define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4)
  22. @@ -597,6 +597,68 @@ free_pagelist:
  23. }
  24. static int
  25. +mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev)
  26. +{
  27. + int n_pages = MTK_WED_RX_PG_BM_CNT / MTK_WED_RX_BUF_PER_PAGE;
  28. + struct mtk_wed_buf *page_list;
  29. + struct mtk_wed_bm_desc *desc;
  30. + dma_addr_t desc_phys;
  31. + int i, page_idx = 0;
  32. +
  33. + if (!dev->wlan.hw_rro)
  34. + return 0;
  35. +
  36. + page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
  37. + if (!page_list)
  38. + return -ENOMEM;
  39. +
  40. + dev->hw_rro.size = dev->wlan.rx_nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
  41. + dev->hw_rro.pages = page_list;
  42. + desc = dma_alloc_coherent(dev->hw->dev,
  43. + dev->wlan.rx_nbuf * sizeof(*desc),
  44. + &desc_phys, GFP_KERNEL);
  45. + if (!desc)
  46. + return -ENOMEM;
  47. +
  48. + dev->hw_rro.desc = desc;
  49. + dev->hw_rro.desc_phys = desc_phys;
  50. +
  51. + for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) {
  52. + dma_addr_t page_phys, buf_phys;
  53. + struct page *page;
  54. + int s;
  55. +
  56. + page = __dev_alloc_page(GFP_KERNEL);
  57. + if (!page)
  58. + return -ENOMEM;
  59. +
  60. + page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
  61. + DMA_BIDIRECTIONAL);
  62. + if (dma_mapping_error(dev->hw->dev, page_phys)) {
  63. + __free_page(page);
  64. + return -ENOMEM;
  65. + }
  66. +
  67. + page_list[page_idx].p = page;
  68. + page_list[page_idx++].phy_addr = page_phys;
  69. + dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
  70. + DMA_BIDIRECTIONAL);
  71. +
  72. + buf_phys = page_phys;
  73. + for (s = 0; s < MTK_WED_RX_BUF_PER_PAGE; s++) {
  74. + desc->buf0 = cpu_to_le32(buf_phys);
  75. + buf_phys += MTK_WED_PAGE_BUF_SIZE;
  76. + desc++;
  77. + }
  78. +
  79. + dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
  80. + DMA_BIDIRECTIONAL);
  81. + }
  82. +
  83. + return 0;
  84. +}
  85. +
  86. +static int
  87. mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
  88. {
  89. struct mtk_wed_bm_desc *desc;
  90. @@ -613,7 +675,42 @@ mtk_wed_rx_buffer_alloc(struct mtk_wed_d
  91. dev->rx_buf_ring.desc_phys = desc_phys;
  92. dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt);
  93. - return 0;
  94. + return mtk_wed_hwrro_buffer_alloc(dev);
  95. +}
  96. +
  97. +static void
  98. +mtk_wed_hwrro_free_buffer(struct mtk_wed_device *dev)
  99. +{
  100. + struct mtk_wed_buf *page_list = dev->hw_rro.pages;
  101. + struct mtk_wed_bm_desc *desc = dev->hw_rro.desc;
  102. + int i, page_idx = 0;
  103. +
  104. + if (!dev->wlan.hw_rro)
  105. + return;
  106. +
  107. + if (!page_list)
  108. + return;
  109. +
  110. + if (!desc)
  111. + goto free_pagelist;
  112. +
  113. + for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) {
  114. + dma_addr_t buf_addr = page_list[page_idx].phy_addr;
  115. + void *page = page_list[page_idx++].p;
  116. +
  117. + if (!page)
  118. + break;
  119. +
  120. + dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
  121. + DMA_BIDIRECTIONAL);
  122. + __free_page(page);
  123. + }
  124. +
  125. + dma_free_coherent(dev->hw->dev, dev->hw_rro.size * sizeof(*desc),
  126. + desc, dev->hw_rro.desc_phys);
  127. +
  128. +free_pagelist:
  129. + kfree(page_list);
  130. }
  131. static void
  132. @@ -627,6 +724,28 @@ mtk_wed_free_rx_buffer(struct mtk_wed_de
  133. dev->wlan.release_rx_buf(dev);
  134. dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc),
  135. desc, dev->rx_buf_ring.desc_phys);
  136. +
  137. + mtk_wed_hwrro_free_buffer(dev);
  138. +}
  139. +
  140. +static void
  141. +mtk_wed_hwrro_init(struct mtk_wed_device *dev)
  142. +{
  143. + if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
  144. + return;
  145. +
  146. + wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM,
  147. + FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128));
  148. +
  149. + wed_w32(dev, MTK_WED_RRO_PG_BM_BASE, dev->hw_rro.desc_phys);
  150. +
  151. + wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR,
  152. + MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX |
  153. + FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX,
  154. + MTK_WED_RX_PG_BM_CNT));
  155. +
  156. + /* enable rx_page_bm to fetch dmad */
  157. + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
  158. }
  159. static void
  160. @@ -640,6 +759,8 @@ mtk_wed_rx_buffer_hw_init(struct mtk_wed
  161. wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
  162. FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
  163. wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
  164. +
  165. + mtk_wed_hwrro_init(dev);
  166. }
  167. static void
  168. @@ -934,6 +1055,8 @@ mtk_wed_bus_init(struct mtk_wed_device *
  169. static void
  170. mtk_wed_set_wpdma(struct mtk_wed_device *dev)
  171. {
  172. + int i;
  173. +
  174. if (mtk_wed_is_v1(dev->hw)) {
  175. wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
  176. return;
  177. @@ -951,6 +1074,15 @@ mtk_wed_set_wpdma(struct mtk_wed_device
  178. wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
  179. wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx);
  180. +
  181. + if (!dev->wlan.hw_rro)
  182. + return;
  183. +
  184. + wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]);
  185. + wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]);
  186. + for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++)
  187. + wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i),
  188. + dev->wlan.wpdma_rx_pg + i * 0x10);
  189. }
  190. static void
  191. @@ -1762,6 +1894,165 @@ mtk_wed_dma_enable(struct mtk_wed_device
  192. }
  193. static void
  194. +mtk_wed_start_hw_rro(struct mtk_wed_device *dev, u32 irq_mask, bool reset)
  195. +{
  196. + int i;
  197. +
  198. + wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
  199. + wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
  200. +
  201. + if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
  202. + return;
  203. +
  204. + wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR);
  205. + wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
  206. + MTK_WED_RRO_MSDU_PG_DRV_CLR);
  207. +
  208. + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX,
  209. + MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN |
  210. + MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR |
  211. + MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN |
  212. + MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR |
  213. + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG,
  214. + dev->wlan.rro_rx_tbit[0]) |
  215. + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG,
  216. + dev->wlan.rro_rx_tbit[1]));
  217. +
  218. + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG,
  219. + MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN |
  220. + MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR |
  221. + MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN |
  222. + MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR |
  223. + MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN |
  224. + MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR |
  225. + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG,
  226. + dev->wlan.rx_pg_tbit[0]) |
  227. + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG,
  228. + dev->wlan.rx_pg_tbit[1]) |
  229. + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG,
  230. + dev->wlan.rx_pg_tbit[2]));
  231. +
  232. + /* RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after
  233. + * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken
  234. + */
  235. + wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
  236. + MTK_WED_RRO_MSDU_PG_DRV_EN);
  237. +
  238. + for (i = 0; i < MTK_WED_RX_QUEUES; i++) {
  239. + struct mtk_wed_ring *ring = &dev->rx_rro_ring[i];
  240. +
  241. + if (!(ring->flags & MTK_WED_RING_CONFIGURED))
  242. + continue;
  243. +
  244. + if (mtk_wed_check_wfdma_rx_fill(dev, ring))
  245. + dev_err(dev->hw->dev,
  246. + "rx_rro_ring(%d) initialization failed\n", i);
  247. + }
  248. +
  249. + for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) {
  250. + struct mtk_wed_ring *ring = &dev->rx_page_ring[i];
  251. +
  252. + if (!(ring->flags & MTK_WED_RING_CONFIGURED))
  253. + continue;
  254. +
  255. + if (mtk_wed_check_wfdma_rx_fill(dev, ring))
  256. + dev_err(dev->hw->dev,
  257. + "rx_page_ring(%d) initialization failed\n", i);
  258. + }
  259. +}
  260. +
  261. +static void
  262. +mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx,
  263. + void __iomem *regs)
  264. +{
  265. + struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
  266. +
  267. + ring->wpdma = regs;
  268. + wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE,
  269. + readl(regs));
  270. + wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT,
  271. + readl(regs + MTK_WED_RING_OFS_COUNT));
  272. + ring->flags |= MTK_WED_RING_CONFIGURED;
  273. +}
  274. +
  275. +static void
  276. +mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
  277. +{
  278. + struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
  279. +
  280. + ring->wpdma = regs;
  281. + wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE,
  282. + readl(regs));
  283. + wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT,
  284. + readl(regs + MTK_WED_RING_OFS_COUNT));
  285. + ring->flags |= MTK_WED_RING_CONFIGURED;
  286. +}
  287. +
  288. +static int
  289. +mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
  290. +{
  291. + struct mtk_wed_ring *ring = &dev->ind_cmd_ring;
  292. + u32 val = readl(regs + MTK_WED_RING_OFS_COUNT);
  293. + int i, count = 0;
  294. +
  295. + ring->wpdma = regs;
  296. + wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE,
  297. + readl(regs) & 0xfffffff0);
  298. +
  299. + wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT,
  300. + readl(regs + MTK_WED_RING_OFS_COUNT));
  301. +
  302. + /* ack sn cr */
  303. + wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base +
  304. + dev->wlan.ind_cmd.ack_sn_addr);
  305. + wed_w32(dev, MTK_WED_RRO_CFG1,
  306. + FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ,
  307. + dev->wlan.ind_cmd.win_size) |
  308. + FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID,
  309. + dev->wlan.ind_cmd.particular_sid));
  310. +
  311. + /* particular session addr element */
  312. + wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0,
  313. + dev->wlan.ind_cmd.particular_se_phys);
  314. +
  315. + for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) {
  316. + wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA,
  317. + dev->wlan.ind_cmd.addr_elem_phys[i] >> 4);
  318. + wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
  319. + MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f));
  320. +
  321. + val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
  322. + while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) && count++ < 100)
  323. + val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
  324. + if (count >= 100)
  325. + dev_err(dev->hw->dev,
  326. + "write ba session base failed\n");
  327. + }
  328. +
  329. + /* pn check init */
  330. + for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) {
  331. + wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M,
  332. + MTK_WED_PN_CHECK_IS_FIRST);
  333. +
  334. + wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR |
  335. + FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i));
  336. +
  337. + count = 0;
  338. + val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
  339. + while (!(val & MTK_WED_PN_CHECK_WR_RDY) && count++ < 100)
  340. + val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
  341. + if (count >= 100)
  342. + dev_err(dev->hw->dev,
  343. + "session(%d) initialization failed\n", i);
  344. + }
  345. +
  346. + wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN);
  347. + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
  348. +
  349. + return 0;
  350. +}
  351. +
  352. +static void
  353. mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
  354. {
  355. int i;
  356. @@ -2212,6 +2503,10 @@ void mtk_wed_add_hw(struct device_node *
  357. .detach = mtk_wed_detach,
  358. .ppe_check = mtk_wed_ppe_check,
  359. .setup_tc = mtk_wed_setup_tc,
  360. + .start_hw_rro = mtk_wed_start_hw_rro,
  361. + .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup,
  362. + .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup,
  363. + .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup,
  364. };
  365. struct device_node *eth_np = eth->dev->of_node;
  366. struct platform_device *pdev;
  367. --- a/include/linux/soc/mediatek/mtk_wed.h
  368. +++ b/include/linux/soc/mediatek/mtk_wed.h
  369. @@ -10,6 +10,7 @@
  370. #define MTK_WED_TX_QUEUES 2
  371. #define MTK_WED_RX_QUEUES 2
  372. +#define MTK_WED_RX_PAGE_QUEUES 3
  373. #define WED_WO_STA_REC 0x6
  374. @@ -99,6 +100,9 @@ struct mtk_wed_device {
  375. struct mtk_wed_ring txfree_ring;
  376. struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
  377. struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
  378. + struct mtk_wed_ring rx_rro_ring[MTK_WED_RX_QUEUES];
  379. + struct mtk_wed_ring rx_page_ring[MTK_WED_RX_PAGE_QUEUES];
  380. + struct mtk_wed_ring ind_cmd_ring;
  381. struct {
  382. int size;
  383. @@ -119,6 +123,13 @@ struct mtk_wed_device {
  384. dma_addr_t fdbk_phys;
  385. } rro;
  386. + struct {
  387. + int size;
  388. + struct mtk_wed_buf *pages;
  389. + struct mtk_wed_bm_desc *desc;
  390. + dma_addr_t desc_phys;
  391. + } hw_rro;
  392. +
  393. /* filled by driver: */
  394. struct {
  395. union {
  396. @@ -137,6 +148,8 @@ struct mtk_wed_device {
  397. u32 wpdma_txfree;
  398. u32 wpdma_rx_glo;
  399. u32 wpdma_rx;
  400. + u32 wpdma_rx_rro[MTK_WED_RX_QUEUES];
  401. + u32 wpdma_rx_pg;
  402. bool wcid_512;
  403. bool hw_rro;
  404. @@ -151,9 +164,20 @@ struct mtk_wed_device {
  405. u8 tx_tbit[MTK_WED_TX_QUEUES];
  406. u8 rx_tbit[MTK_WED_RX_QUEUES];
  407. + u8 rro_rx_tbit[MTK_WED_RX_QUEUES];
  408. + u8 rx_pg_tbit[MTK_WED_RX_PAGE_QUEUES];
  409. u8 txfree_tbit;
  410. u8 amsdu_max_subframes;
  411. + struct {
  412. + u8 se_group_nums;
  413. + u16 win_size;
  414. + u16 particular_sid;
  415. + u32 ack_sn_addr;
  416. + dma_addr_t particular_se_phys;
  417. + dma_addr_t addr_elem_phys[1024];
  418. + } ind_cmd;
  419. +
  420. u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
  421. int (*offload_enable)(struct mtk_wed_device *wed);
  422. void (*offload_disable)(struct mtk_wed_device *wed);
  423. @@ -192,6 +216,14 @@ struct mtk_wed_ops {
  424. void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
  425. int (*setup_tc)(struct mtk_wed_device *wed, struct net_device *dev,
  426. enum tc_setup_type type, void *type_data);
  427. + void (*start_hw_rro)(struct mtk_wed_device *dev, u32 irq_mask,
  428. + bool reset);
  429. + void (*rro_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
  430. + void __iomem *regs);
  431. + void (*msdu_pg_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
  432. + void __iomem *regs);
  433. + int (*ind_rx_ring_setup)(struct mtk_wed_device *dev,
  434. + void __iomem *regs);
  435. };
  436. extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
  437. @@ -263,6 +295,15 @@ static inline bool mtk_wed_is_amsdu_supp
  438. #define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev)
  439. #define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) \
  440. (_dev)->ops->setup_tc(_dev, _netdev, _type, _type_data)
  441. +#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) \
  442. + (_dev)->ops->start_hw_rro(_dev, _mask, _reset)
  443. +#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) \
  444. + (_dev)->ops->rro_rx_ring_setup(_dev, _ring, _regs)
  445. +#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) \
  446. + (_dev)->ops->msdu_pg_rx_ring_setup(_dev, _ring, _regs)
  447. +#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) \
  448. + (_dev)->ops->ind_rx_ring_setup(_dev, _regs)
  449. +
  450. #else
  451. static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
  452. {
  453. @@ -282,6 +323,10 @@ static inline bool mtk_wed_device_active
  454. #define mtk_wed_device_stop(_dev) do {} while (0)
  455. #define mtk_wed_device_dma_reset(_dev) do {} while (0)
  456. #define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) -EOPNOTSUPP
  457. +#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) do {} while (0)
  458. +#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) -ENODEV
  459. +#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) -ENODEV
  460. +#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) -ENODEV
  461. #endif
  462. #endif