752-16-v6.7-net-ethernet-mtk_wed-introduce-partial-AMSDU-offload.patch 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465
  1. From: Sujuan Chen <[email protected]>
  2. Date: Mon, 18 Sep 2023 12:29:15 +0200
  3. Subject: [PATCH] net: ethernet: mtk_wed: introduce partial AMSDU offload
  4. support for MT7988
  5. Introduce partial AMSDU offload support for MT7988 SoC in order to merge
  6. in hw packets belonging to the same AMSDU before passing them to the
  7. WLAN nic.
  8. Co-developed-by: Lorenzo Bianconi <[email protected]>
  9. Signed-off-by: Lorenzo Bianconi <[email protected]>
  10. Signed-off-by: Sujuan Chen <[email protected]>
  11. Signed-off-by: Paolo Abeni <[email protected]>
  12. ---
  13. --- a/drivers/net/ethernet/mediatek/mtk_ppe.c
  14. +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
  15. @@ -439,7 +439,8 @@ int mtk_foe_entry_set_pppoe(struct mtk_e
  16. }
  17. int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
  18. - int wdma_idx, int txq, int bss, int wcid)
  19. + int wdma_idx, int txq, int bss, int wcid,
  20. + bool amsdu_en)
  21. {
  22. struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
  23. u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
  24. @@ -451,6 +452,7 @@ int mtk_foe_entry_set_wdma(struct mtk_et
  25. MTK_FOE_IB2_WDMA_WINFO_V2;
  26. l2->w3info = FIELD_PREP(MTK_FOE_WINFO_WCID_V3, wcid) |
  27. FIELD_PREP(MTK_FOE_WINFO_BSS_V3, bss);
  28. + l2->amsdu = FIELD_PREP(MTK_FOE_WINFO_AMSDU_EN, amsdu_en);
  29. break;
  30. case 2:
  31. *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
  32. --- a/drivers/net/ethernet/mediatek/mtk_ppe.h
  33. +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
  34. @@ -88,13 +88,13 @@ enum {
  35. #define MTK_FOE_WINFO_BSS_V3 GENMASK(23, 16)
  36. #define MTK_FOE_WINFO_WCID_V3 GENMASK(15, 0)
  37. -#define MTK_FOE_WINFO_PAO_USR_INFO GENMASK(15, 0)
  38. -#define MTK_FOE_WINFO_PAO_TID GENMASK(19, 16)
  39. -#define MTK_FOE_WINFO_PAO_IS_FIXEDRATE BIT(20)
  40. -#define MTK_FOE_WINFO_PAO_IS_PRIOR BIT(21)
  41. -#define MTK_FOE_WINFO_PAO_IS_SP BIT(22)
  42. -#define MTK_FOE_WINFO_PAO_HF BIT(23)
  43. -#define MTK_FOE_WINFO_PAO_AMSDU_EN BIT(24)
  44. +#define MTK_FOE_WINFO_AMSDU_USR_INFO GENMASK(15, 0)
  45. +#define MTK_FOE_WINFO_AMSDU_TID GENMASK(19, 16)
  46. +#define MTK_FOE_WINFO_AMSDU_IS_FIXEDRATE BIT(20)
  47. +#define MTK_FOE_WINFO_AMSDU_IS_PRIOR BIT(21)
  48. +#define MTK_FOE_WINFO_AMSDU_IS_SP BIT(22)
  49. +#define MTK_FOE_WINFO_AMSDU_HF BIT(23)
  50. +#define MTK_FOE_WINFO_AMSDU_EN BIT(24)
  51. enum {
  52. MTK_FOE_STATE_INVALID,
  53. @@ -123,7 +123,7 @@ struct mtk_foe_mac_info {
  54. /* netsys_v3 */
  55. u32 w3info;
  56. - u32 wpao;
  57. + u32 amsdu;
  58. };
  59. /* software-only entry type */
  60. @@ -394,7 +394,8 @@ int mtk_foe_entry_set_vlan(struct mtk_et
  61. int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
  62. int sid);
  63. int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
  64. - int wdma_idx, int txq, int bss, int wcid);
  65. + int wdma_idx, int txq, int bss, int wcid,
  66. + bool amsdu_en);
  67. int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
  68. unsigned int queue);
  69. int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
  70. --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
  71. +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
  72. @@ -111,6 +111,7 @@ mtk_flow_get_wdma_info(struct net_device
  73. info->queue = path->mtk_wdma.queue;
  74. info->bss = path->mtk_wdma.bss;
  75. info->wcid = path->mtk_wdma.wcid;
  76. + info->amsdu = path->mtk_wdma.amsdu;
  77. return 0;
  78. }
  79. @@ -192,7 +193,7 @@ mtk_flow_set_output_device(struct mtk_et
  80. if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
  81. mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
  82. - info.bss, info.wcid);
  83. + info.bss, info.wcid, info.amsdu);
  84. if (mtk_is_netsys_v2_or_greater(eth)) {
  85. switch (info.wdma_idx) {
  86. case 0:
  87. --- a/drivers/net/ethernet/mediatek/mtk_wed.c
  88. +++ b/drivers/net/ethernet/mediatek/mtk_wed.c
  89. @@ -30,6 +30,8 @@
  90. #define MTK_WED_RX_PAGE_BUF_PER_PAGE (PAGE_SIZE / 128)
  91. #define MTK_WED_RX_RING_SIZE 1536
  92. #define MTK_WED_RX_PG_BM_CNT 8192
  93. +#define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4)
  94. +#define MTK_WED_AMSDU_NPAGES 32
  95. #define MTK_WED_TX_RING_SIZE 2048
  96. #define MTK_WED_WDMA_RING_SIZE 1024
  97. @@ -173,6 +175,23 @@ mtk_wdma_rx_reset(struct mtk_wed_device
  98. return ret;
  99. }
  100. +static u32
  101. +mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
  102. +{
  103. + return !!(wed_r32(dev, reg) & mask);
  104. +}
  105. +
  106. +static int
  107. +mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
  108. +{
  109. + int sleep = 15000;
  110. + int timeout = 100 * sleep;
  111. + u32 val;
  112. +
  113. + return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
  114. + timeout, false, dev, reg, mask);
  115. +}
  116. +
  117. static void
  118. mtk_wdma_tx_reset(struct mtk_wed_device *dev)
  119. {
  120. @@ -336,6 +355,118 @@ out:
  121. }
  122. static int
  123. +mtk_wed_amsdu_buffer_alloc(struct mtk_wed_device *dev)
  124. +{
  125. + struct mtk_wed_hw *hw = dev->hw;
  126. + struct mtk_wed_amsdu *wed_amsdu;
  127. + int i;
  128. +
  129. + if (!mtk_wed_is_v3_or_greater(hw))
  130. + return 0;
  131. +
  132. + wed_amsdu = devm_kcalloc(hw->dev, MTK_WED_AMSDU_NPAGES,
  133. + sizeof(*wed_amsdu), GFP_KERNEL);
  134. + if (!wed_amsdu)
  135. + return -ENOMEM;
  136. +
  137. + for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) {
  138. + void *ptr;
  139. +
  140. + /* each segment is 64K */
  141. + ptr = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN |
  142. + __GFP_ZERO | __GFP_COMP |
  143. + GFP_DMA32,
  144. + get_order(MTK_WED_AMSDU_BUF_SIZE));
  145. + if (!ptr)
  146. + goto error;
  147. +
  148. + wed_amsdu[i].txd = ptr;
  149. + wed_amsdu[i].txd_phy = dma_map_single(hw->dev, ptr,
  150. + MTK_WED_AMSDU_BUF_SIZE,
  151. + DMA_TO_DEVICE);
  152. + if (dma_mapping_error(hw->dev, wed_amsdu[i].txd_phy))
  153. + goto error;
  154. + }
  155. + dev->hw->wed_amsdu = wed_amsdu;
  156. +
  157. + return 0;
  158. +
  159. +error:
  160. + for (i--; i >= 0; i--)
  161. + dma_unmap_single(hw->dev, wed_amsdu[i].txd_phy,
  162. + MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE);
  163. + return -ENOMEM;
  164. +}
  165. +
  166. +static void
  167. +mtk_wed_amsdu_free_buffer(struct mtk_wed_device *dev)
  168. +{
  169. + struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu;
  170. + int i;
  171. +
  172. + if (!wed_amsdu)
  173. + return;
  174. +
  175. + for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) {
  176. + dma_unmap_single(dev->hw->dev, wed_amsdu[i].txd_phy,
  177. + MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE);
  178. + free_pages((unsigned long)wed_amsdu[i].txd,
  179. + get_order(MTK_WED_AMSDU_BUF_SIZE));
  180. + }
  181. +}
  182. +
  183. +static int
  184. +mtk_wed_amsdu_init(struct mtk_wed_device *dev)
  185. +{
  186. + struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu;
  187. + int i, ret;
  188. +
  189. + if (!wed_amsdu)
  190. + return 0;
  191. +
  192. + for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++)
  193. + wed_w32(dev, MTK_WED_AMSDU_HIFTXD_BASE_L(i),
  194. + wed_amsdu[i].txd_phy);
  195. +
  196. + /* init all sta parameter */
  197. + wed_w32(dev, MTK_WED_AMSDU_STA_INFO_INIT, MTK_WED_AMSDU_STA_RMVL |
  198. + MTK_WED_AMSDU_STA_WTBL_HDRT_MODE |
  199. + FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_LEN,
  200. + dev->wlan.amsdu_max_len >> 8) |
  201. + FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_NUM,
  202. + dev->wlan.amsdu_max_subframes));
  203. +
  204. + wed_w32(dev, MTK_WED_AMSDU_STA_INFO, MTK_WED_AMSDU_STA_INFO_DO_INIT);
  205. +
  206. + ret = mtk_wed_poll_busy(dev, MTK_WED_AMSDU_STA_INFO,
  207. + MTK_WED_AMSDU_STA_INFO_DO_INIT);
  208. + if (ret) {
  209. + dev_err(dev->hw->dev, "amsdu initialization failed\n");
  210. + return ret;
  211. + }
  212. +
  213. + /* init partial amsdu offload txd src */
  214. + wed_set(dev, MTK_WED_AMSDU_HIFTXD_CFG,
  215. + FIELD_PREP(MTK_WED_AMSDU_HIFTXD_SRC, dev->hw->index));
  216. +
  217. + /* init qmem */
  218. + wed_set(dev, MTK_WED_AMSDU_PSE, MTK_WED_AMSDU_PSE_RESET);
  219. + ret = mtk_wed_poll_busy(dev, MTK_WED_MON_AMSDU_QMEM_STS1, BIT(29));
  220. + if (ret) {
  221. + pr_info("%s: amsdu qmem initialization failed\n", __func__);
  222. + return ret;
  223. + }
  224. +
  225. + /* eagle E1 PCIE1 tx ring 22 flow control issue */
  226. + if (dev->wlan.id == 0x7991)
  227. + wed_clr(dev, MTK_WED_AMSDU_FIFO, MTK_WED_AMSDU_IS_PRIOR0_RING);
  228. +
  229. + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
  230. +
  231. + return 0;
  232. +}
  233. +
  234. +static int
  235. mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
  236. {
  237. u32 desc_size = dev->hw->soc->tx_ring_desc_size;
  238. @@ -709,6 +840,7 @@ __mtk_wed_detach(struct mtk_wed_device *
  239. mtk_wdma_rx_reset(dev);
  240. mtk_wed_reset(dev, MTK_WED_RESET_WED);
  241. + mtk_wed_amsdu_free_buffer(dev);
  242. mtk_wed_free_tx_buffer(dev);
  243. mtk_wed_free_tx_rings(dev);
  244. @@ -1129,23 +1261,6 @@ mtk_wed_ring_reset(struct mtk_wed_ring *
  245. }
  246. }
  247. -static u32
  248. -mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
  249. -{
  250. - return !!(wed_r32(dev, reg) & mask);
  251. -}
  252. -
  253. -static int
  254. -mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
  255. -{
  256. - int sleep = 15000;
  257. - int timeout = 100 * sleep;
  258. - u32 val;
  259. -
  260. - return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
  261. - timeout, false, dev, reg, mask);
  262. -}
  263. -
  264. static int
  265. mtk_wed_rx_reset(struct mtk_wed_device *dev)
  266. {
  267. @@ -1692,6 +1807,7 @@ mtk_wed_start(struct mtk_wed_device *dev
  268. }
  269. mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
  270. + mtk_wed_amsdu_init(dev);
  271. mtk_wed_dma_enable(dev);
  272. dev->running = true;
  273. @@ -1748,6 +1864,10 @@ mtk_wed_attach(struct mtk_wed_device *de
  274. if (ret)
  275. goto out;
  276. + ret = mtk_wed_amsdu_buffer_alloc(dev);
  277. + if (ret)
  278. + goto out;
  279. +
  280. if (mtk_wed_get_rx_capa(dev)) {
  281. ret = mtk_wed_rro_alloc(dev);
  282. if (ret)
  283. --- a/drivers/net/ethernet/mediatek/mtk_wed.h
  284. +++ b/drivers/net/ethernet/mediatek/mtk_wed.h
  285. @@ -25,6 +25,11 @@ struct mtk_wed_soc_data {
  286. u32 wdma_desc_size;
  287. };
  288. +struct mtk_wed_amsdu {
  289. + void *txd;
  290. + dma_addr_t txd_phy;
  291. +};
  292. +
  293. struct mtk_wed_hw {
  294. const struct mtk_wed_soc_data *soc;
  295. struct device_node *node;
  296. @@ -38,6 +43,7 @@ struct mtk_wed_hw {
  297. struct dentry *debugfs_dir;
  298. struct mtk_wed_device *wed_dev;
  299. struct mtk_wed_wo *wed_wo;
  300. + struct mtk_wed_amsdu *wed_amsdu;
  301. u32 pcie_base;
  302. u32 debugfs_reg;
  303. u32 num_flows;
  304. @@ -52,6 +58,7 @@ struct mtk_wdma_info {
  305. u8 queue;
  306. u16 wcid;
  307. u8 bss;
  308. + u8 amsdu;
  309. };
  310. #ifdef CONFIG_NET_MEDIATEK_SOC_WED
  311. --- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
  312. +++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
  313. @@ -672,6 +672,82 @@ struct mtk_wdma_desc {
  314. #define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
  315. #define MTK_WED_PCIE_INT_MASK 0x0
  316. +#define MTK_WED_AMSDU_FIFO 0x1800
  317. +#define MTK_WED_AMSDU_IS_PRIOR0_RING BIT(10)
  318. +
  319. +#define MTK_WED_AMSDU_STA_INFO 0x01810
  320. +#define MTK_WED_AMSDU_STA_INFO_DO_INIT BIT(0)
  321. +#define MTK_WED_AMSDU_STA_INFO_SET_INIT BIT(1)
  322. +
  323. +#define MTK_WED_AMSDU_STA_INFO_INIT 0x01814
  324. +#define MTK_WED_AMSDU_STA_WTBL_HDRT_MODE BIT(0)
  325. +#define MTK_WED_AMSDU_STA_RMVL BIT(1)
  326. +#define MTK_WED_AMSDU_STA_MAX_AMSDU_LEN GENMASK(7, 2)
  327. +#define MTK_WED_AMSDU_STA_MAX_AMSDU_NUM GENMASK(11, 8)
  328. +
  329. +#define MTK_WED_AMSDU_HIFTXD_BASE_L(_n) (0x1980 + (_n) * 0x4)
  330. +
  331. +#define MTK_WED_AMSDU_PSE 0x1910
  332. +#define MTK_WED_AMSDU_PSE_RESET BIT(16)
  333. +
  334. +#define MTK_WED_AMSDU_HIFTXD_CFG 0x1968
  335. +#define MTK_WED_AMSDU_HIFTXD_SRC GENMASK(16, 15)
  336. +
  337. +#define MTK_WED_MON_AMSDU_FIFO_DMAD 0x1a34
  338. +
  339. +#define MTK_WED_MON_AMSDU_ENG_DMAD(_n) (0x1a80 + (_n) * 0x50)
  340. +#define MTK_WED_MON_AMSDU_ENG_QFPL(_n) (0x1a84 + (_n) * 0x50)
  341. +#define MTK_WED_MON_AMSDU_ENG_QENI(_n) (0x1a88 + (_n) * 0x50)
  342. +#define MTK_WED_MON_AMSDU_ENG_QENO(_n) (0x1a8c + (_n) * 0x50)
  343. +#define MTK_WED_MON_AMSDU_ENG_MERG(_n) (0x1a90 + (_n) * 0x50)
  344. +
  345. +#define MTK_WED_MON_AMSDU_ENG_CNT8(_n) (0x1a94 + (_n) * 0x50)
  346. +#define MTK_WED_AMSDU_ENG_MAX_QGPP_CNT GENMASK(10, 0)
  347. +#define MTK_WED_AMSDU_ENG_MAX_PL_CNT GENMASK(27, 16)
  348. +
  349. +#define MTK_WED_MON_AMSDU_ENG_CNT9(_n) (0x1a98 + (_n) * 0x50)
  350. +#define MTK_WED_AMSDU_ENG_CUR_ENTRY GENMASK(10, 0)
  351. +#define MTK_WED_AMSDU_ENG_MAX_BUF_MERGED GENMASK(20, 16)
  352. +#define MTK_WED_AMSDU_ENG_MAX_MSDU_MERGED GENMASK(28, 24)
  353. +
  354. +#define MTK_WED_MON_AMSDU_QMEM_STS1 0x1e04
  355. +
  356. +#define MTK_WED_MON_AMSDU_QMEM_CNT(_n) (0x1e0c + (_n) * 0x4)
  357. +#define MTK_WED_AMSDU_QMEM_FQ_CNT GENMASK(27, 16)
  358. +#define MTK_WED_AMSDU_QMEM_SP_QCNT GENMASK(11, 0)
  359. +#define MTK_WED_AMSDU_QMEM_TID0_QCNT GENMASK(27, 16)
  360. +#define MTK_WED_AMSDU_QMEM_TID1_QCNT GENMASK(11, 0)
  361. +#define MTK_WED_AMSDU_QMEM_TID2_QCNT GENMASK(27, 16)
  362. +#define MTK_WED_AMSDU_QMEM_TID3_QCNT GENMASK(11, 0)
  363. +#define MTK_WED_AMSDU_QMEM_TID4_QCNT GENMASK(27, 16)
  364. +#define MTK_WED_AMSDU_QMEM_TID5_QCNT GENMASK(11, 0)
  365. +#define MTK_WED_AMSDU_QMEM_TID6_QCNT GENMASK(27, 16)
  366. +#define MTK_WED_AMSDU_QMEM_TID7_QCNT GENMASK(11, 0)
  367. +
  368. +#define MTK_WED_MON_AMSDU_QMEM_PTR(_n) (0x1e20 + (_n) * 0x4)
  369. +#define MTK_WED_AMSDU_QMEM_FQ_HEAD GENMASK(27, 16)
  370. +#define MTK_WED_AMSDU_QMEM_SP_QHEAD GENMASK(11, 0)
  371. +#define MTK_WED_AMSDU_QMEM_TID0_QHEAD GENMASK(27, 16)
  372. +#define MTK_WED_AMSDU_QMEM_TID1_QHEAD GENMASK(11, 0)
  373. +#define MTK_WED_AMSDU_QMEM_TID2_QHEAD GENMASK(27, 16)
  374. +#define MTK_WED_AMSDU_QMEM_TID3_QHEAD GENMASK(11, 0)
  375. +#define MTK_WED_AMSDU_QMEM_TID4_QHEAD GENMASK(27, 16)
  376. +#define MTK_WED_AMSDU_QMEM_TID5_QHEAD GENMASK(11, 0)
  377. +#define MTK_WED_AMSDU_QMEM_TID6_QHEAD GENMASK(27, 16)
  378. +#define MTK_WED_AMSDU_QMEM_TID7_QHEAD GENMASK(11, 0)
  379. +#define MTK_WED_AMSDU_QMEM_FQ_TAIL GENMASK(27, 16)
  380. +#define MTK_WED_AMSDU_QMEM_SP_QTAIL GENMASK(11, 0)
  381. +#define MTK_WED_AMSDU_QMEM_TID0_QTAIL GENMASK(27, 16)
  382. +#define MTK_WED_AMSDU_QMEM_TID1_QTAIL GENMASK(11, 0)
  383. +#define MTK_WED_AMSDU_QMEM_TID2_QTAIL GENMASK(27, 16)
  384. +#define MTK_WED_AMSDU_QMEM_TID3_QTAIL GENMASK(11, 0)
  385. +#define MTK_WED_AMSDU_QMEM_TID4_QTAIL GENMASK(27, 16)
  386. +#define MTK_WED_AMSDU_QMEM_TID5_QTAIL GENMASK(11, 0)
  387. +#define MTK_WED_AMSDU_QMEM_TID6_QTAIL GENMASK(27, 16)
  388. +#define MTK_WED_AMSDU_QMEM_TID7_QTAIL GENMASK(11, 0)
  389. +
  390. +#define MTK_WED_MON_AMSDU_HIFTXD_FETCH_MSDU(_n) (0x1ec4 + (_n) * 0x4)
  391. +
  392. #define MTK_WED_PCIE_BASE 0x11280000
  393. #define MTK_WED_PCIE_BASE0 0x11300000
  394. #define MTK_WED_PCIE_BASE1 0x11310000
  395. --- a/include/linux/netdevice.h
  396. +++ b/include/linux/netdevice.h
  397. @@ -917,6 +917,7 @@ struct net_device_path {
  398. u8 queue;
  399. u16 wcid;
  400. u8 bss;
  401. + u8 amsdu;
  402. } mtk_wdma;
  403. };
  404. };
  405. --- a/include/linux/soc/mediatek/mtk_wed.h
  406. +++ b/include/linux/soc/mediatek/mtk_wed.h
  407. @@ -128,6 +128,7 @@ struct mtk_wed_device {
  408. enum mtk_wed_bus_tye bus_type;
  409. void __iomem *base;
  410. u32 phy_base;
  411. + u32 id;
  412. u32 wpdma_phys;
  413. u32 wpdma_int;
  414. @@ -146,10 +147,12 @@ struct mtk_wed_device {
  415. unsigned int rx_nbuf;
  416. unsigned int rx_npkt;
  417. unsigned int rx_size;
  418. + unsigned int amsdu_max_len;
  419. u8 tx_tbit[MTK_WED_TX_QUEUES];
  420. u8 rx_tbit[MTK_WED_RX_QUEUES];
  421. u8 txfree_tbit;
  422. + u8 amsdu_max_subframes;
  423. u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
  424. int (*offload_enable)(struct mtk_wed_device *wed);
  425. @@ -223,6 +226,15 @@ static inline bool mtk_wed_get_rx_capa(s
  426. #else
  427. return false;
  428. #endif
  429. +}
  430. +
  431. +static inline bool mtk_wed_is_amsdu_supported(struct mtk_wed_device *dev)
  432. +{
  433. +#ifdef CONFIG_NET_MEDIATEK_SOC_WED
  434. + return dev->version == 3;
  435. +#else
  436. + return false;
  437. +#endif
  438. }
  439. #ifdef CONFIG_NET_MEDIATEK_SOC_WED