729-02-v6.1-net-ethernet-mtk_wed-introduce-wed-wo-support.patch 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737
  1. From: Lorenzo Bianconi <[email protected]>
  2. Date: Sat, 5 Nov 2022 23:36:19 +0100
  3. Subject: [PATCH] net: ethernet: mtk_wed: introduce wed wo support
  4. Introduce WO chip support to mtk wed driver. MTK WED WO is used to
  5. implement RX Wireless Ethernet Dispatch and offload traffic received by
  6. wlan nic to the wired interface.
  7. Tested-by: Daniel Golle <[email protected]>
  8. Co-developed-by: Sujuan Chen <[email protected]>
  9. Signed-off-by: Sujuan Chen <[email protected]>
  10. Signed-off-by: Lorenzo Bianconi <[email protected]>
  11. Signed-off-by: David S. Miller <[email protected]>
  12. ---
  13. create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.c
  14. --- a/drivers/net/ethernet/mediatek/Makefile
  15. +++ b/drivers/net/ethernet/mediatek/Makefile
  16. @@ -5,7 +5,7 @@
  17. obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
  18. mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
  19. -mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o
  20. +mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o mtk_wed_wo.o
  21. ifdef CONFIG_DEBUG_FS
  22. mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
  23. endif
  24. --- a/drivers/net/ethernet/mediatek/mtk_wed.c
  25. +++ b/drivers/net/ethernet/mediatek/mtk_wed.c
  26. @@ -16,6 +16,7 @@
  27. #include "mtk_wed_regs.h"
  28. #include "mtk_wed.h"
  29. #include "mtk_ppe.h"
  30. +#include "mtk_wed_wo.h"
  31. #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
  32. @@ -355,6 +356,8 @@ mtk_wed_detach(struct mtk_wed_device *de
  33. mtk_wed_free_buffer(dev);
  34. mtk_wed_free_tx_rings(dev);
  35. + if (hw->version != 1)
  36. + mtk_wed_wo_deinit(hw);
  37. if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
  38. struct device_node *wlan_node;
  39. @@ -878,9 +881,11 @@ mtk_wed_attach(struct mtk_wed_device *de
  40. }
  41. mtk_wed_hw_init_early(dev);
  42. - if (hw->hifsys)
  43. + if (hw->version == 1)
  44. regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
  45. BIT(hw->index), 0);
  46. + else
  47. + ret = mtk_wed_wo_init(hw);
  48. out:
  49. mutex_unlock(&hw_lock);
  50. --- a/drivers/net/ethernet/mediatek/mtk_wed.h
  51. +++ b/drivers/net/ethernet/mediatek/mtk_wed.h
  52. @@ -10,6 +10,7 @@
  53. #include <linux/netdevice.h>
  54. struct mtk_eth;
  55. +struct mtk_wed_wo;
  56. struct mtk_wed_hw {
  57. struct device_node *node;
  58. @@ -22,6 +23,7 @@ struct mtk_wed_hw {
  59. struct regmap *mirror;
  60. struct dentry *debugfs_dir;
  61. struct mtk_wed_device *wed_dev;
  62. + struct mtk_wed_wo *wed_wo;
  63. u32 debugfs_reg;
  64. u32 num_flows;
  65. u8 version;
  66. --- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
  67. +++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
  68. @@ -122,8 +122,7 @@ mtk_wed_mcu_skb_send_msg(struct mtk_wed_
  69. if (id == MTK_WED_MODULE_ID_WO)
  70. hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_FROM_TO_WO);
  71. - dev_kfree_skb(skb);
  72. - return 0;
  73. + return mtk_wed_wo_queue_tx_skb(wo, &wo->q_tx, skb);
  74. }
  75. static int
  76. --- /dev/null
  77. +++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
  78. @@ -0,0 +1,508 @@
  79. +// SPDX-License-Identifier: GPL-2.0-only
  80. +/* Copyright (C) 2022 MediaTek Inc.
  81. + *
  82. + * Author: Lorenzo Bianconi <[email protected]>
  83. + * Sujuan Chen <[email protected]>
  84. + */
  85. +
  86. +#include <linux/kernel.h>
  87. +#include <linux/dma-mapping.h>
  88. +#include <linux/of_platform.h>
  89. +#include <linux/interrupt.h>
  90. +#include <linux/of_address.h>
  91. +#include <linux/mfd/syscon.h>
  92. +#include <linux/of_irq.h>
  93. +#include <linux/bitfield.h>
  94. +
  95. +#include "mtk_wed.h"
  96. +#include "mtk_wed_regs.h"
  97. +#include "mtk_wed_wo.h"
  98. +
  99. +static u32
  100. +mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
  101. +{
  102. + u32 val;
  103. +
  104. + if (regmap_read(wo->mmio.regs, reg, &val))
  105. + val = ~0;
  106. +
  107. + return val;
  108. +}
  109. +
  110. +static void
  111. +mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
  112. +{
  113. + regmap_write(wo->mmio.regs, reg, val);
  114. +}
  115. +
  116. +static u32
  117. +mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
  118. +{
  119. + u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
  120. +
  121. + return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
  122. +}
  123. +
  124. +static void
  125. +mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
  126. +{
  127. + mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
  128. +}
  129. +
  130. +static void
  131. +mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
  132. +{
  133. + mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
  134. +}
  135. +
  136. +static void
  137. +mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
  138. +{
  139. + unsigned long flags;
  140. +
  141. + spin_lock_irqsave(&wo->mmio.lock, flags);
  142. + wo->mmio.irq_mask &= ~mask;
  143. + wo->mmio.irq_mask |= val;
  144. + if (set)
  145. + mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
  146. + spin_unlock_irqrestore(&wo->mmio.lock, flags);
  147. +}
  148. +
  149. +static void
  150. +mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
  151. +{
  152. + mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
  153. + tasklet_schedule(&wo->mmio.irq_tasklet);
  154. +}
  155. +
  156. +static void
  157. +mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
  158. +{
  159. + mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
  160. +}
  161. +
  162. +static void
  163. +mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
  164. +{
  165. + mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
  166. + mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
  167. +}
  168. +
  169. +static void
  170. +mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
  171. + u32 val)
  172. +{
  173. + wmb();
  174. + mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
  175. +}
  176. +
  177. +static void *
  178. +mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
  179. + bool flush)
  180. +{
  181. + int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
  182. + int index = (q->tail + 1) % q->n_desc;
  183. + struct mtk_wed_wo_queue_entry *entry;
  184. + struct mtk_wed_wo_queue_desc *desc;
  185. + void *buf;
  186. +
  187. + if (!q->queued)
  188. + return NULL;
  189. +
  190. + if (flush)
  191. + q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
  192. + else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
  193. + return NULL;
  194. +
  195. + q->tail = index;
  196. + q->queued--;
  197. +
  198. + desc = &q->desc[index];
  199. + entry = &q->entry[index];
  200. + buf = entry->buf;
  201. + if (len)
  202. + *len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
  203. + le32_to_cpu(READ_ONCE(desc->ctrl)));
  204. + if (buf)
  205. + dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
  206. + DMA_FROM_DEVICE);
  207. + entry->buf = NULL;
  208. +
  209. + return buf;
  210. +}
  211. +
  212. +static int
  213. +mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
  214. + gfp_t gfp, bool rx)
  215. +{
  216. + enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  217. + int n_buf = 0;
  218. +
  219. + spin_lock_bh(&q->lock);
  220. + while (q->queued < q->n_desc) {
  221. + void *buf = page_frag_alloc(&q->cache, q->buf_size, gfp);
  222. + struct mtk_wed_wo_queue_entry *entry;
  223. + dma_addr_t addr;
  224. +
  225. + if (!buf)
  226. + break;
  227. +
  228. + addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
  229. + if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
  230. + skb_free_frag(buf);
  231. + break;
  232. + }
  233. +
  234. + q->head = (q->head + 1) % q->n_desc;
  235. + entry = &q->entry[q->head];
  236. + entry->addr = addr;
  237. + entry->len = q->buf_size;
  238. + q->entry[q->head].buf = buf;
  239. +
  240. + if (rx) {
  241. + struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
  242. + u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
  243. + FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
  244. + entry->len);
  245. +
  246. + WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
  247. + WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
  248. + }
  249. + q->queued++;
  250. + n_buf++;
  251. + }
  252. + spin_unlock_bh(&q->lock);
  253. +
  254. + return n_buf;
  255. +}
  256. +
  257. +static void
  258. +mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
  259. +{
  260. + mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
  261. + mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
  262. +}
  263. +
  264. +static void
  265. +mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
  266. +{
  267. + for (;;) {
  268. + struct mtk_wed_mcu_hdr *hdr;
  269. + struct sk_buff *skb;
  270. + void *data;
  271. + u32 len;
  272. +
  273. + data = mtk_wed_wo_dequeue(wo, q, &len, false);
  274. + if (!data)
  275. + break;
  276. +
  277. + skb = build_skb(data, q->buf_size);
  278. + if (!skb) {
  279. + skb_free_frag(data);
  280. + continue;
  281. + }
  282. +
  283. + __skb_put(skb, len);
  284. + if (mtk_wed_mcu_check_msg(wo, skb)) {
  285. + dev_kfree_skb(skb);
  286. + continue;
  287. + }
  288. +
  289. + hdr = (struct mtk_wed_mcu_hdr *)skb->data;
  290. + if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
  291. + mtk_wed_mcu_rx_event(wo, skb);
  292. + else
  293. + mtk_wed_mcu_rx_unsolicited_event(wo, skb);
  294. + }
  295. +
  296. + if (mtk_wed_wo_queue_refill(wo, q, GFP_ATOMIC, true)) {
  297. + u32 index = (q->head - 1) % q->n_desc;
  298. +
  299. + mtk_wed_wo_queue_kick(wo, q, index);
  300. + }
  301. +}
  302. +
  303. +static irqreturn_t
  304. +mtk_wed_wo_irq_handler(int irq, void *data)
  305. +{
  306. + struct mtk_wed_wo *wo = data;
  307. +
  308. + mtk_wed_wo_set_isr(wo, 0);
  309. + tasklet_schedule(&wo->mmio.irq_tasklet);
  310. +
  311. + return IRQ_HANDLED;
  312. +}
  313. +
  314. +static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
  315. +{
  316. + struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
  317. + u32 intr, mask;
  318. +
  319. + /* disable interrupts */
  320. + mtk_wed_wo_set_isr(wo, 0);
  321. +
  322. + intr = mtk_wed_wo_get_isr(wo);
  323. + intr &= wo->mmio.irq_mask;
  324. + mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
  325. + mtk_wed_wo_irq_disable(wo, mask);
  326. +
  327. + if (intr & MTK_WED_WO_RXCH_INT_MASK) {
  328. + mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
  329. + mtk_wed_wo_rx_complete(wo);
  330. + }
  331. +}
  332. +
  333. +/* mtk wed wo hw queues */
  334. +
  335. +static int
  336. +mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
  337. + int n_desc, int buf_size, int index,
  338. + struct mtk_wed_wo_queue_regs *regs)
  339. +{
  340. + spin_lock_init(&q->lock);
  341. + q->regs = *regs;
  342. + q->n_desc = n_desc;
  343. + q->buf_size = buf_size;
  344. +
  345. + q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
  346. + &q->desc_dma, GFP_KERNEL);
  347. + if (!q->desc)
  348. + return -ENOMEM;
  349. +
  350. + q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
  351. + GFP_KERNEL);
  352. + if (!q->entry)
  353. + return -ENOMEM;
  354. +
  355. + return 0;
  356. +}
  357. +
  358. +static void
  359. +mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
  360. +{
  361. + mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
  362. + dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
  363. + q->desc_dma);
  364. +}
  365. +
  366. +static void
  367. +mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
  368. +{
  369. + struct page *page;
  370. + int i;
  371. +
  372. + spin_lock_bh(&q->lock);
  373. + for (i = 0; i < q->n_desc; i++) {
  374. + struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
  375. +
  376. + dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
  377. + DMA_TO_DEVICE);
  378. + skb_free_frag(entry->buf);
  379. + entry->buf = NULL;
  380. + }
  381. + spin_unlock_bh(&q->lock);
  382. +
  383. + if (!q->cache.va)
  384. + return;
  385. +
  386. + page = virt_to_page(q->cache.va);
  387. + __page_frag_cache_drain(page, q->cache.pagecnt_bias);
  388. + memset(&q->cache, 0, sizeof(q->cache));
  389. +}
  390. +
  391. +static void
  392. +mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
  393. +{
  394. + struct page *page;
  395. +
  396. + spin_lock_bh(&q->lock);
  397. + for (;;) {
  398. + void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
  399. +
  400. + if (!buf)
  401. + break;
  402. +
  403. + skb_free_frag(buf);
  404. + }
  405. + spin_unlock_bh(&q->lock);
  406. +
  407. + if (!q->cache.va)
  408. + return;
  409. +
  410. + page = virt_to_page(q->cache.va);
  411. + __page_frag_cache_drain(page, q->cache.pagecnt_bias);
  412. + memset(&q->cache, 0, sizeof(q->cache));
  413. +}
  414. +
  415. +static void
  416. +mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
  417. +{
  418. + mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
  419. + mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
  420. + mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
  421. +}
  422. +
  423. +int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
  424. + struct sk_buff *skb)
  425. +{
  426. + struct mtk_wed_wo_queue_entry *entry;
  427. + struct mtk_wed_wo_queue_desc *desc;
  428. + int ret = 0, index;
  429. + u32 ctrl;
  430. +
  431. + spin_lock_bh(&q->lock);
  432. +
  433. + q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
  434. + index = (q->head + 1) % q->n_desc;
  435. + if (q->tail == index) {
  436. + ret = -ENOMEM;
  437. + goto out;
  438. + }
  439. +
  440. + entry = &q->entry[index];
  441. + if (skb->len > entry->len) {
  442. + ret = -ENOMEM;
  443. + goto out;
  444. + }
  445. +
  446. + desc = &q->desc[index];
  447. + q->head = index;
  448. +
  449. + dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
  450. + DMA_TO_DEVICE);
  451. + memcpy(entry->buf, skb->data, skb->len);
  452. + dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
  453. + DMA_TO_DEVICE);
  454. +
  455. + ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
  456. + MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
  457. + WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
  458. + WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
  459. +
  460. + mtk_wed_wo_queue_kick(wo, q, q->head);
  461. + mtk_wed_wo_kickout(wo);
  462. +out:
  463. + spin_unlock_bh(&q->lock);
  464. +
  465. + dev_kfree_skb(skb);
  466. +
  467. + return ret;
  468. +}
  469. +
  470. +static int
  471. +mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
  472. +{
  473. + return 0;
  474. +}
  475. +
  476. +static int
  477. +mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
  478. +{
  479. + struct mtk_wed_wo_queue_regs regs;
  480. + struct device_node *np;
  481. + int ret;
  482. +
  483. + np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
  484. + if (!np)
  485. + return -ENODEV;
  486. +
  487. + wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
  488. + if (IS_ERR_OR_NULL(wo->mmio.regs))
  489. + return PTR_ERR(wo->mmio.regs);
  490. +
  491. + wo->mmio.irq = irq_of_parse_and_map(np, 0);
  492. + wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
  493. + spin_lock_init(&wo->mmio.lock);
  494. + tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
  495. +
  496. + ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
  497. + mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
  498. + KBUILD_MODNAME, wo);
  499. + if (ret)
  500. + goto error;
  501. +
  502. + regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
  503. + regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
  504. + regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
  505. + regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
  506. +
  507. + ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
  508. + MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
  509. + &regs);
  510. + if (ret)
  511. + goto error;
  512. +
  513. + mtk_wed_wo_queue_refill(wo, &wo->q_tx, GFP_KERNEL, false);
  514. + mtk_wed_wo_queue_reset(wo, &wo->q_tx);
  515. +
  516. + regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
  517. + regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
  518. + regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
  519. + regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
  520. +
  521. + ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
  522. + MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
  523. + &regs);
  524. + if (ret)
  525. + goto error;
  526. +
  527. + mtk_wed_wo_queue_refill(wo, &wo->q_rx, GFP_KERNEL, true);
  528. + mtk_wed_wo_queue_reset(wo, &wo->q_rx);
  529. +
  530. + /* rx queue irqmask */
  531. + mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
  532. +
  533. + return 0;
  534. +
  535. +error:
  536. + devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
  537. +
  538. + return ret;
  539. +}
  540. +
  541. +static void
  542. +mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
  543. +{
  544. + /* disable interrupts */
  545. + mtk_wed_wo_set_isr(wo, 0);
  546. +
  547. + tasklet_disable(&wo->mmio.irq_tasklet);
  548. +
  549. + disable_irq(wo->mmio.irq);
  550. + devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
  551. +
  552. + mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
  553. + mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
  554. + mtk_wed_wo_queue_free(wo, &wo->q_tx);
  555. + mtk_wed_wo_queue_free(wo, &wo->q_rx);
  556. +}
  557. +
  558. +int mtk_wed_wo_init(struct mtk_wed_hw *hw)
  559. +{
  560. + struct mtk_wed_wo *wo;
  561. + int ret;
  562. +
  563. + wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
  564. + if (!wo)
  565. + return -ENOMEM;
  566. +
  567. + hw->wed_wo = wo;
  568. + wo->hw = hw;
  569. +
  570. + ret = mtk_wed_wo_hardware_init(wo);
  571. + if (ret)
  572. + return ret;
  573. +
  574. + ret = mtk_wed_mcu_init(wo);
  575. + if (ret)
  576. + return ret;
  577. +
  578. + return mtk_wed_wo_exception_init(wo);
  579. +}
  580. +
  581. +void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
  582. +{
  583. + struct mtk_wed_wo *wo = hw->wed_wo;
  584. +
  585. + mtk_wed_wo_hw_deinit(wo);
  586. +}
  587. --- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
  588. +++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
  589. @@ -80,6 +80,54 @@ enum mtk_wed_dummy_cr_idx {
  590. #define MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK BIT(5)
  591. #define MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK BIT(0)
  592. +#define MTK_WED_WO_RING_SIZE 256
  593. +#define MTK_WED_WO_CMD_LEN 1504
  594. +
  595. +#define MTK_WED_WO_TXCH_NUM 0
  596. +#define MTK_WED_WO_RXCH_NUM 1
  597. +#define MTK_WED_WO_RXCH_WO_EXCEPTION 7
  598. +
  599. +#define MTK_WED_WO_TXCH_INT_MASK BIT(0)
  600. +#define MTK_WED_WO_RXCH_INT_MASK BIT(1)
  601. +#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7)
  602. +#define MTK_WED_WO_ALL_INT_MASK (MTK_WED_WO_RXCH_INT_MASK | \
  603. + MTK_WED_WO_EXCEPTION_INT_MASK)
  604. +
  605. +#define MTK_WED_WO_CCIF_BUSY 0x004
  606. +#define MTK_WED_WO_CCIF_START 0x008
  607. +#define MTK_WED_WO_CCIF_TCHNUM 0x00c
  608. +#define MTK_WED_WO_CCIF_RCHNUM 0x010
  609. +#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0)
  610. +
  611. +#define MTK_WED_WO_CCIF_ACK 0x014
  612. +#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018
  613. +#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c
  614. +#define MTK_WED_WO_CCIF_DUMMY1 0x020
  615. +#define MTK_WED_WO_CCIF_DUMMY2 0x024
  616. +#define MTK_WED_WO_CCIF_DUMMY3 0x028
  617. +#define MTK_WED_WO_CCIF_DUMMY4 0x02c
  618. +#define MTK_WED_WO_CCIF_SHADOW1 0x030
  619. +#define MTK_WED_WO_CCIF_SHADOW2 0x034
  620. +#define MTK_WED_WO_CCIF_SHADOW3 0x038
  621. +#define MTK_WED_WO_CCIF_SHADOW4 0x03c
  622. +#define MTK_WED_WO_CCIF_DUMMY5 0x050
  623. +#define MTK_WED_WO_CCIF_DUMMY6 0x054
  624. +#define MTK_WED_WO_CCIF_DUMMY7 0x058
  625. +#define MTK_WED_WO_CCIF_DUMMY8 0x05c
  626. +#define MTK_WED_WO_CCIF_SHADOW5 0x060
  627. +#define MTK_WED_WO_CCIF_SHADOW6 0x064
  628. +#define MTK_WED_WO_CCIF_SHADOW7 0x068
  629. +#define MTK_WED_WO_CCIF_SHADOW8 0x06c
  630. +
  631. +#define MTK_WED_WO_CTL_SD_LEN1 GENMASK(13, 0)
  632. +#define MTK_WED_WO_CTL_LAST_SEC1 BIT(14)
  633. +#define MTK_WED_WO_CTL_BURST BIT(15)
  634. +#define MTK_WED_WO_CTL_SD_LEN0_SHIFT 16
  635. +#define MTK_WED_WO_CTL_SD_LEN0 GENMASK(29, 16)
  636. +#define MTK_WED_WO_CTL_LAST_SEC0 BIT(30)
  637. +#define MTK_WED_WO_CTL_DMA_DONE BIT(31)
  638. +#define MTK_WED_WO_INFO_WINFO GENMASK(15, 0)
  639. +
  640. struct mtk_wed_wo_memory_region {
  641. const char *name;
  642. void __iomem *addr;
  643. @@ -112,10 +160,53 @@ struct mtk_wed_fw_trailer {
  644. u32 crc;
  645. };
  646. +struct mtk_wed_wo_queue_regs {
  647. + u32 desc_base;
  648. + u32 ring_size;
  649. + u32 cpu_idx;
  650. + u32 dma_idx;
  651. +};
  652. +
  653. +struct mtk_wed_wo_queue_desc {
  654. + __le32 buf0;
  655. + __le32 ctrl;
  656. + __le32 buf1;
  657. + __le32 info;
  658. + __le32 reserved[4];
  659. +} __packed __aligned(32);
  660. +
  661. +struct mtk_wed_wo_queue_entry {
  662. + dma_addr_t addr;
  663. + void *buf;
  664. + u32 len;
  665. +};
  666. +
  667. +struct mtk_wed_wo_queue {
  668. + struct mtk_wed_wo_queue_regs regs;
  669. +
  670. + struct page_frag_cache cache;
  671. + spinlock_t lock;
  672. +
  673. + struct mtk_wed_wo_queue_desc *desc;
  674. + dma_addr_t desc_dma;
  675. +
  676. + struct mtk_wed_wo_queue_entry *entry;
  677. +
  678. + u16 head;
  679. + u16 tail;
  680. + int n_desc;
  681. + int queued;
  682. + int buf_size;
  683. +
  684. +};
  685. +
  686. struct mtk_wed_wo {
  687. struct mtk_wed_hw *hw;
  688. struct mtk_wed_wo_memory_region boot;
  689. + struct mtk_wed_wo_queue q_tx;
  690. + struct mtk_wed_wo_queue q_rx;
  691. +
  692. struct {
  693. struct mutex mutex;
  694. int timeout;
  695. @@ -124,6 +215,15 @@ struct mtk_wed_wo {
  696. struct sk_buff_head res_q;
  697. wait_queue_head_t wait;
  698. } mcu;
  699. +
  700. + struct {
  701. + struct regmap *regs;
  702. +
  703. + spinlock_t lock;
  704. + struct tasklet_struct irq_tasklet;
  705. + int irq;
  706. + u32 irq_mask;
  707. + } mmio;
  708. };
  709. static inline int
  710. @@ -146,5 +246,9 @@ void mtk_wed_mcu_rx_unsolicited_event(st
  711. int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd,
  712. const void *data, int len, bool wait_resp);
  713. int mtk_wed_mcu_init(struct mtk_wed_wo *wo);
  714. +int mtk_wed_wo_init(struct mtk_wed_hw *hw);
  715. +void mtk_wed_wo_deinit(struct mtk_wed_hw *hw);
  716. +int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *dev, struct mtk_wed_wo_queue *q,
  717. + struct sk_buff *skb);
  718. #endif /* __MTK_WED_WO_H */