2
0

702-v5.19-02-net-ethernet-mtk_eth_soc-add-support-for-Wireless-Et.patch 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679
  1. From: Felix Fietkau <[email protected]>
  2. Date: Sat, 5 Feb 2022 17:56:08 +0100
  3. Subject: [PATCH] net: ethernet: mtk_eth_soc: add support for Wireless
  4. Ethernet Dispatch (WED)
  5. The Wireless Ethernet Dispatch subsystem on the MT7622 SoC can be
  6. configured to intercept and handle access to the DMA queues and
  7. PCIe interrupts for a MT7615/MT7915 wireless card.
  8. It can manage the internal WDMA (Wireless DMA) controller, which allows
  9. ethernet packets to be passed from the packet switch engine (PSE) to the
  10. wireless card, bypassing the CPU entirely.
  11. This can be used to implement hardware flow offloading from ethernet to
  12. WLAN.
  13. Signed-off-by: Felix Fietkau <[email protected]>
  14. ---
  15. create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.c
  16. create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.h
  17. create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
  18. create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ops.c
  19. create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_regs.h
  20. create mode 100644 include/linux/soc/mediatek/mtk_wed.h
  21. --- a/drivers/net/ethernet/mediatek/Kconfig
  22. +++ b/drivers/net/ethernet/mediatek/Kconfig
  23. @@ -7,6 +7,10 @@ config NET_VENDOR_MEDIATEK
  24. if NET_VENDOR_MEDIATEK
  25. +config NET_MEDIATEK_SOC_WED
  26. + depends on ARCH_MEDIATEK || COMPILE_TEST
  27. + def_bool NET_MEDIATEK_SOC != n
  28. +
  29. config NET_MEDIATEK_SOC
  30. tristate "MediaTek SoC Gigabit Ethernet support"
  31. depends on NET_DSA || !NET_DSA
  32. --- a/drivers/net/ethernet/mediatek/Makefile
  33. +++ b/drivers/net/ethernet/mediatek/Makefile
  34. @@ -5,4 +5,9 @@
  35. obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
  36. mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
  37. +mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
  38. +ifdef CONFIG_DEBUG_FS
  39. +mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
  40. +endif
  41. +obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
  42. obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
  43. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  44. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  45. @@ -24,6 +24,7 @@
  46. #include <net/dsa.h>
  47. #include "mtk_eth_soc.h"
  48. +#include "mtk_wed.h"
  49. static int mtk_msg_level = -1;
  50. module_param_named(msg_level, mtk_msg_level, int, 0);
  51. @@ -3215,6 +3216,22 @@ static int mtk_probe(struct platform_dev
  52. }
  53. }
  54. + for (i = 0;; i++) {
  55. + struct device_node *np = of_parse_phandle(pdev->dev.of_node,
  56. + "mediatek,wed", i);
  57. + static const u32 wdma_regs[] = {
  58. + MTK_WDMA0_BASE,
  59. + MTK_WDMA1_BASE
  60. + };
  61. + void __iomem *wdma;
  62. +
  63. + if (!np || i >= ARRAY_SIZE(wdma_regs))
  64. + break;
  65. +
  66. + wdma = eth->base + wdma_regs[i];
  67. + mtk_wed_add_hw(np, eth, wdma, i);
  68. + }
  69. +
  70. for (i = 0; i < 3; i++) {
  71. if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
  72. eth->irq[i] = eth->irq[0];
  73. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  74. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  75. @@ -295,6 +295,9 @@
  76. #define MTK_GDM1_TX_GPCNT 0x2438
  77. #define MTK_STAT_OFFSET 0x40
  78. +#define MTK_WDMA0_BASE 0x2800
  79. +#define MTK_WDMA1_BASE 0x2c00
  80. +
  81. /* QDMA descriptor txd4 */
  82. #define TX_DMA_CHKSUM (0x7 << 29)
  83. #define TX_DMA_TSO BIT(28)
  84. --- /dev/null
  85. +++ b/drivers/net/ethernet/mediatek/mtk_wed.c
  86. @@ -0,0 +1,875 @@
  87. +// SPDX-License-Identifier: GPL-2.0-only
  88. +/* Copyright (C) 2021 Felix Fietkau <[email protected]> */
  89. +
  90. +#include <linux/kernel.h>
  91. +#include <linux/slab.h>
  92. +#include <linux/module.h>
  93. +#include <linux/bitfield.h>
  94. +#include <linux/dma-mapping.h>
  95. +#include <linux/skbuff.h>
  96. +#include <linux/of_platform.h>
  97. +#include <linux/of_address.h>
  98. +#include <linux/mfd/syscon.h>
  99. +#include <linux/debugfs.h>
  100. +#include <linux/soc/mediatek/mtk_wed.h>
  101. +#include "mtk_eth_soc.h"
  102. +#include "mtk_wed_regs.h"
  103. +#include "mtk_wed.h"
  104. +#include "mtk_ppe.h"
  105. +
  106. +#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
  107. +
  108. +#define MTK_WED_PKT_SIZE 1900
  109. +#define MTK_WED_BUF_SIZE 2048
  110. +#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
  111. +
  112. +#define MTK_WED_TX_RING_SIZE 2048
  113. +#define MTK_WED_WDMA_RING_SIZE 1024
  114. +
  115. +static struct mtk_wed_hw *hw_list[2];
  116. +static DEFINE_MUTEX(hw_lock);
  117. +
  118. +static void
  119. +wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
  120. +{
  121. + regmap_update_bits(dev->hw->regs, reg, mask | val, val);
  122. +}
  123. +
  124. +static void
  125. +wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
  126. +{
  127. + return wed_m32(dev, reg, 0, mask);
  128. +}
  129. +
  130. +static void
  131. +wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
  132. +{
  133. + return wed_m32(dev, reg, mask, 0);
  134. +}
  135. +
  136. +static void
  137. +wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
  138. +{
  139. + wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val);
  140. +}
  141. +
  142. +static void
  143. +wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
  144. +{
  145. + wdma_m32(dev, reg, 0, mask);
  146. +}
  147. +
  148. +static u32
  149. +mtk_wed_read_reset(struct mtk_wed_device *dev)
  150. +{
  151. + return wed_r32(dev, MTK_WED_RESET);
  152. +}
  153. +
  154. +static void
  155. +mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
  156. +{
  157. + u32 status;
  158. +
  159. + wed_w32(dev, MTK_WED_RESET, mask);
  160. + if (readx_poll_timeout(mtk_wed_read_reset, dev, status,
  161. + !(status & mask), 0, 1000))
  162. + WARN_ON_ONCE(1);
  163. +}
  164. +
  165. +static struct mtk_wed_hw *
  166. +mtk_wed_assign(struct mtk_wed_device *dev)
  167. +{
  168. + struct mtk_wed_hw *hw;
  169. +
  170. + hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
  171. + if (!hw || hw->wed_dev)
  172. + return NULL;
  173. +
  174. + hw->wed_dev = dev;
  175. + return hw;
  176. +}
  177. +
  178. +static int
  179. +mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
  180. +{
  181. + struct mtk_wdma_desc *desc;
  182. + dma_addr_t desc_phys;
  183. + void **page_list;
  184. + int token = dev->wlan.token_start;
  185. + int ring_size;
  186. + int n_pages;
  187. + int i, page_idx;
  188. +
  189. + ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
  190. + n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
  191. +
  192. + page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
  193. + if (!page_list)
  194. + return -ENOMEM;
  195. +
  196. + dev->buf_ring.size = ring_size;
  197. + dev->buf_ring.pages = page_list;
  198. +
  199. + desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
  200. + &desc_phys, GFP_KERNEL);
  201. + if (!desc)
  202. + return -ENOMEM;
  203. +
  204. + dev->buf_ring.desc = desc;
  205. + dev->buf_ring.desc_phys = desc_phys;
  206. +
  207. + for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
  208. + dma_addr_t page_phys, buf_phys;
  209. + struct page *page;
  210. + void *buf;
  211. + int s;
  212. +
  213. + page = __dev_alloc_pages(GFP_KERNEL, 0);
  214. + if (!page)
  215. + return -ENOMEM;
  216. +
  217. + page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
  218. + DMA_BIDIRECTIONAL);
  219. + if (dma_mapping_error(dev->hw->dev, page_phys)) {
  220. + __free_page(page);
  221. + return -ENOMEM;
  222. + }
  223. +
  224. + page_list[page_idx++] = page;
  225. + dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
  226. + DMA_BIDIRECTIONAL);
  227. +
  228. + buf = page_to_virt(page);
  229. + buf_phys = page_phys;
  230. +
  231. + for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
  232. + u32 txd_size;
  233. +
  234. + txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
  235. +
  236. + desc->buf0 = buf_phys;
  237. + desc->buf1 = buf_phys + txd_size;
  238. + desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
  239. + txd_size) |
  240. + FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
  241. + MTK_WED_BUF_SIZE - txd_size) |
  242. + MTK_WDMA_DESC_CTRL_LAST_SEG1;
  243. + desc->info = 0;
  244. + desc++;
  245. +
  246. + buf += MTK_WED_BUF_SIZE;
  247. + buf_phys += MTK_WED_BUF_SIZE;
  248. + }
  249. +
  250. + dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
  251. + DMA_BIDIRECTIONAL);
  252. + }
  253. +
  254. + return 0;
  255. +}
  256. +
  257. +static void
  258. +mtk_wed_free_buffer(struct mtk_wed_device *dev)
  259. +{
  260. + struct mtk_wdma_desc *desc = dev->buf_ring.desc;
  261. + void **page_list = dev->buf_ring.pages;
  262. + int page_idx;
  263. + int i;
  264. +
  265. + if (!page_list)
  266. + return;
  267. +
  268. + if (!desc)
  269. + goto free_pagelist;
  270. +
  271. + for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
  272. + void *page = page_list[page_idx++];
  273. +
  274. + if (!page)
  275. + break;
  276. +
  277. + dma_unmap_page(dev->hw->dev, desc[i].buf0,
  278. + PAGE_SIZE, DMA_BIDIRECTIONAL);
  279. + __free_page(page);
  280. + }
  281. +
  282. + dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
  283. + desc, dev->buf_ring.desc_phys);
  284. +
  285. +free_pagelist:
  286. + kfree(page_list);
  287. +}
  288. +
  289. +static void
  290. +mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
  291. +{
  292. + if (!ring->desc)
  293. + return;
  294. +
  295. + dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc),
  296. + ring->desc, ring->desc_phys);
  297. +}
  298. +
  299. +static void
  300. +mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
  301. +{
  302. + int i;
  303. +
  304. + for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
  305. + mtk_wed_free_ring(dev, &dev->tx_ring[i]);
  306. + for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
  307. + mtk_wed_free_ring(dev, &dev->tx_wdma[i]);
  308. +}
  309. +
  310. +static void
  311. +mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
  312. +{
  313. + u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
  314. +
  315. + if (!dev->hw->num_flows)
  316. + mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
  317. +
  318. + wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0);
  319. + wed_r32(dev, MTK_WED_EXT_INT_MASK);
  320. +}
  321. +
  322. +static void
  323. +mtk_wed_stop(struct mtk_wed_device *dev)
  324. +{
  325. + regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
  326. + mtk_wed_set_ext_int(dev, false);
  327. +
  328. + wed_clr(dev, MTK_WED_CTRL,
  329. + MTK_WED_CTRL_WDMA_INT_AGENT_EN |
  330. + MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
  331. + MTK_WED_CTRL_WED_TX_BM_EN |
  332. + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
  333. + wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
  334. + wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
  335. + wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
  336. + wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
  337. + wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
  338. +
  339. + wed_clr(dev, MTK_WED_GLO_CFG,
  340. + MTK_WED_GLO_CFG_TX_DMA_EN |
  341. + MTK_WED_GLO_CFG_RX_DMA_EN);
  342. + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
  343. + MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
  344. + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
  345. + wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
  346. + MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
  347. +}
  348. +
  349. +static void
  350. +mtk_wed_detach(struct mtk_wed_device *dev)
  351. +{
  352. + struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node;
  353. + struct mtk_wed_hw *hw = dev->hw;
  354. +
  355. + mutex_lock(&hw_lock);
  356. +
  357. + mtk_wed_stop(dev);
  358. +
  359. + wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
  360. + wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
  361. +
  362. + mtk_wed_reset(dev, MTK_WED_RESET_WED);
  363. +
  364. + mtk_wed_free_buffer(dev);
  365. + mtk_wed_free_tx_rings(dev);
  366. +
  367. + if (of_dma_is_coherent(wlan_node))
  368. + regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
  369. + BIT(hw->index), BIT(hw->index));
  370. +
  371. + if (!hw_list[!hw->index]->wed_dev &&
  372. + hw->eth->dma_dev != hw->eth->dev)
  373. + mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
  374. +
  375. + memset(dev, 0, sizeof(*dev));
  376. + module_put(THIS_MODULE);
  377. +
  378. + hw->wed_dev = NULL;
  379. + mutex_unlock(&hw_lock);
  380. +}
  381. +
  382. +static void
  383. +mtk_wed_hw_init_early(struct mtk_wed_device *dev)
  384. +{
  385. + u32 mask, set;
  386. + u32 offset;
  387. +
  388. + mtk_wed_stop(dev);
  389. + mtk_wed_reset(dev, MTK_WED_RESET_WED);
  390. +
  391. + mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
  392. + MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
  393. + MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
  394. + set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
  395. + MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
  396. + MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
  397. + wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
  398. +
  399. + wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES);
  400. +
  401. + offset = dev->hw->index ? 0x04000400 : 0;
  402. + wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
  403. + wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
  404. +
  405. + wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index));
  406. + wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
  407. +}
  408. +
  409. +static void
  410. +mtk_wed_hw_init(struct mtk_wed_device *dev)
  411. +{
  412. + if (dev->init_done)
  413. + return;
  414. +
  415. + dev->init_done = true;
  416. + mtk_wed_set_ext_int(dev, false);
  417. + wed_w32(dev, MTK_WED_TX_BM_CTRL,
  418. + MTK_WED_TX_BM_CTRL_PAUSE |
  419. + FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
  420. + dev->buf_ring.size / 128) |
  421. + FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
  422. + MTK_WED_TX_RING_SIZE / 256));
  423. +
  424. + wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
  425. +
  426. + wed_w32(dev, MTK_WED_TX_BM_TKID,
  427. + FIELD_PREP(MTK_WED_TX_BM_TKID_START,
  428. + dev->wlan.token_start) |
  429. + FIELD_PREP(MTK_WED_TX_BM_TKID_END,
  430. + dev->wlan.token_start + dev->wlan.nbuf - 1));
  431. +
  432. + wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
  433. +
  434. + wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
  435. + FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
  436. + MTK_WED_TX_BM_DYN_THR_HI);
  437. +
  438. + mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
  439. +
  440. + wed_set(dev, MTK_WED_CTRL,
  441. + MTK_WED_CTRL_WED_TX_BM_EN |
  442. + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
  443. +
  444. + wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
  445. +}
  446. +
  447. +static void
  448. +mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size)
  449. +{
  450. + int i;
  451. +
  452. + for (i = 0; i < size; i++) {
  453. + desc[i].buf0 = 0;
  454. + desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
  455. + desc[i].buf1 = 0;
  456. + desc[i].info = 0;
  457. + }
  458. +}
  459. +
  460. +static u32
  461. +mtk_wed_check_busy(struct mtk_wed_device *dev)
  462. +{
  463. + if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
  464. + return true;
  465. +
  466. + if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
  467. + MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
  468. + return true;
  469. +
  470. + if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
  471. + return true;
  472. +
  473. + if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
  474. + MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
  475. + return true;
  476. +
  477. + if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
  478. + MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
  479. + return true;
  480. +
  481. + if (wed_r32(dev, MTK_WED_CTRL) &
  482. + (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
  483. + return true;
  484. +
  485. + return false;
  486. +}
  487. +
  488. +static int
  489. +mtk_wed_poll_busy(struct mtk_wed_device *dev)
  490. +{
  491. + int sleep = 15000;
  492. + int timeout = 100 * sleep;
  493. + u32 val;
  494. +
  495. + return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
  496. + timeout, false, dev);
  497. +}
  498. +
  499. +static void
  500. +mtk_wed_reset_dma(struct mtk_wed_device *dev)
  501. +{
  502. + bool busy = false;
  503. + u32 val;
  504. + int i;
  505. +
  506. + for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
  507. + struct mtk_wdma_desc *desc = dev->tx_ring[i].desc;
  508. +
  509. + if (!desc)
  510. + continue;
  511. +
  512. + mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE);
  513. + }
  514. +
  515. + if (mtk_wed_poll_busy(dev))
  516. + busy = mtk_wed_check_busy(dev);
  517. +
  518. + if (busy) {
  519. + mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
  520. + } else {
  521. + wed_w32(dev, MTK_WED_RESET_IDX,
  522. + MTK_WED_RESET_IDX_TX |
  523. + MTK_WED_RESET_IDX_RX);
  524. + wed_w32(dev, MTK_WED_RESET_IDX, 0);
  525. + }
  526. +
  527. + wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
  528. + wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
  529. +
  530. + if (busy) {
  531. + mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
  532. + mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
  533. + } else {
  534. + wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
  535. + MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
  536. + wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
  537. +
  538. + wed_set(dev, MTK_WED_WDMA_GLO_CFG,
  539. + MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
  540. +
  541. + wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
  542. + MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
  543. + }
  544. +
  545. + for (i = 0; i < 100; i++) {
  546. + val = wed_r32(dev, MTK_WED_TX_BM_INTF);
  547. + if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
  548. + break;
  549. + }
  550. +
  551. + mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
  552. + mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
  553. +
  554. + if (busy) {
  555. + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
  556. + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
  557. + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
  558. + } else {
  559. + wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
  560. + MTK_WED_WPDMA_RESET_IDX_TX |
  561. + MTK_WED_WPDMA_RESET_IDX_RX);
  562. + wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
  563. + }
  564. +
  565. +}
  566. +
  567. +static int
  568. +mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
  569. + int size)
  570. +{
  571. + ring->desc = dma_alloc_coherent(dev->hw->dev,
  572. + size * sizeof(*ring->desc),
  573. + &ring->desc_phys, GFP_KERNEL);
  574. + if (!ring->desc)
  575. + return -ENOMEM;
  576. +
  577. + ring->size = size;
  578. + mtk_wed_ring_reset(ring->desc, size);
  579. +
  580. + return 0;
  581. +}
  582. +
  583. +static int
  584. +mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
  585. +{
  586. + struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
  587. +
  588. + if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE))
  589. + return -ENOMEM;
  590. +
  591. + wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
  592. + wdma->desc_phys);
  593. + wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
  594. + size);
  595. + wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
  596. +
  597. + wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
  598. + wdma->desc_phys);
  599. + wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
  600. + size);
  601. +
  602. + return 0;
  603. +}
  604. +
  605. +static void
  606. +mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
  607. +{
  608. + u32 wdma_mask;
  609. + u32 val;
  610. + int i;
  611. +
  612. + for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
  613. + if (!dev->tx_wdma[i].desc)
  614. + mtk_wed_wdma_ring_setup(dev, i, 16);
  615. +
  616. + wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
  617. +
  618. + mtk_wed_hw_init(dev);
  619. +
  620. + wed_set(dev, MTK_WED_CTRL,
  621. + MTK_WED_CTRL_WDMA_INT_AGENT_EN |
  622. + MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
  623. + MTK_WED_CTRL_WED_TX_BM_EN |
  624. + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
  625. +
  626. + wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS);
  627. +
  628. + wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
  629. + MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
  630. + MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
  631. +
  632. + wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
  633. + MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
  634. +
  635. + wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
  636. + wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
  637. +
  638. + wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
  639. + wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
  640. +
  641. + wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
  642. + wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
  643. +
  644. + wed_set(dev, MTK_WED_GLO_CFG,
  645. + MTK_WED_GLO_CFG_TX_DMA_EN |
  646. + MTK_WED_GLO_CFG_RX_DMA_EN);
  647. + wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
  648. + MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
  649. + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
  650. + wed_set(dev, MTK_WED_WDMA_GLO_CFG,
  651. + MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
  652. +
  653. + mtk_wed_set_ext_int(dev, true);
  654. + val = dev->wlan.wpdma_phys |
  655. + MTK_PCIE_MIRROR_MAP_EN |
  656. + FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index);
  657. +
  658. + if (dev->hw->index)
  659. + val |= BIT(1);
  660. + val |= BIT(0);
  661. + regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
  662. +
  663. + dev->running = true;
  664. +}
  665. +
  666. +static int
  667. +mtk_wed_attach(struct mtk_wed_device *dev)
  668. + __releases(RCU)
  669. +{
  670. + struct mtk_wed_hw *hw;
  671. + int ret = 0;
  672. +
  673. + RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
  674. + "mtk_wed_attach without holding the RCU read lock");
  675. +
  676. + if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 ||
  677. + !try_module_get(THIS_MODULE))
  678. + ret = -ENODEV;
  679. +
  680. + rcu_read_unlock();
  681. +
  682. + if (ret)
  683. + return ret;
  684. +
  685. + mutex_lock(&hw_lock);
  686. +
  687. + hw = mtk_wed_assign(dev);
  688. + if (!hw) {
  689. + module_put(THIS_MODULE);
  690. + ret = -ENODEV;
  691. + goto out;
  692. + }
  693. +
  694. + dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index);
  695. +
  696. + dev->hw = hw;
  697. + dev->dev = hw->dev;
  698. + dev->irq = hw->irq;
  699. + dev->wdma_idx = hw->index;
  700. +
  701. + if (hw->eth->dma_dev == hw->eth->dev &&
  702. + of_dma_is_coherent(hw->eth->dev->of_node))
  703. + mtk_eth_set_dma_device(hw->eth, hw->dev);
  704. +
  705. + ret = mtk_wed_buffer_alloc(dev);
  706. + if (ret) {
  707. + mtk_wed_detach(dev);
  708. + goto out;
  709. + }
  710. +
  711. + mtk_wed_hw_init_early(dev);
  712. + regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0);
  713. +
  714. +out:
  715. + mutex_unlock(&hw_lock);
  716. +
  717. + return ret;
  718. +}
  719. +
  720. +static int
  721. +mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
  722. +{
  723. + struct mtk_wed_ring *ring = &dev->tx_ring[idx];
  724. +
  725. + /*
  726. + * Tx ring redirection:
  727. + * Instead of configuring the WLAN PDMA TX ring directly, the WLAN
  728. + * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n)
  729. + * registers.
  730. + *
  731. + * WED driver posts its own DMA ring as WLAN PDMA TX and configures it
  732. + * into MTK_WED_WPDMA_RING_TX(n) registers.
  733. + * It gets filled with packets picked up from WED TX ring and from
  734. + * WDMA RX.
  735. + */
  736. +
  737. + BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
  738. +
  739. + if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
  740. + return -ENOMEM;
  741. +
  742. + if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
  743. + return -ENOMEM;
  744. +
  745. + ring->reg_base = MTK_WED_RING_TX(idx);
  746. + ring->wpdma = regs;
  747. +
  748. + /* WED -> WPDMA */
  749. + wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
  750. + wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
  751. + wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0);
  752. +
  753. + wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
  754. + ring->desc_phys);
  755. + wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
  756. + MTK_WED_TX_RING_SIZE);
  757. + wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
  758. +
  759. + return 0;
  760. +}
  761. +
  762. +static int
  763. +mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
  764. +{
  765. + struct mtk_wed_ring *ring = &dev->txfree_ring;
  766. + int i;
  767. +
  768. + /*
  769. + * For txfree event handling, the same DMA ring is shared between WED
  770. + * and WLAN. The WLAN driver accesses the ring index registers through
  771. + * WED
  772. + */
  773. + ring->reg_base = MTK_WED_RING_RX(1);
  774. + ring->wpdma = regs;
  775. +
  776. + for (i = 0; i < 12; i += 4) {
  777. + u32 val = readl(regs + i);
  778. +
  779. + wed_w32(dev, MTK_WED_RING_RX(1) + i, val);
  780. + wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val);
  781. + }
  782. +
  783. + return 0;
  784. +}
  785. +
  786. +static u32
  787. +mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
  788. +{
  789. + u32 val;
  790. +
  791. + val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
  792. + wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
  793. + val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
  794. + if (!dev->hw->num_flows)
  795. + val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
  796. + if (val && net_ratelimit())
  797. + pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
  798. +
  799. + val = wed_r32(dev, MTK_WED_INT_STATUS);
  800. + val &= mask;
  801. + wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */
  802. +
  803. + return val;
  804. +}
  805. +
  806. +static void
  807. +mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
  808. +{
  809. + if (!dev->running)
  810. + return;
  811. +
  812. + mtk_wed_set_ext_int(dev, !!mask);
  813. + wed_w32(dev, MTK_WED_INT_MASK, mask);
  814. +}
  815. +
  816. +int mtk_wed_flow_add(int index)
  817. +{
  818. + struct mtk_wed_hw *hw = hw_list[index];
  819. + int ret;
  820. +
  821. + if (!hw || !hw->wed_dev)
  822. + return -ENODEV;
  823. +
  824. + if (hw->num_flows) {
  825. + hw->num_flows++;
  826. + return 0;
  827. + }
  828. +
  829. + mutex_lock(&hw_lock);
  830. + if (!hw->wed_dev) {
  831. + ret = -ENODEV;
  832. + goto out;
  833. + }
  834. +
  835. + ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev);
  836. + if (!ret)
  837. + hw->num_flows++;
  838. + mtk_wed_set_ext_int(hw->wed_dev, true);
  839. +
  840. +out:
  841. + mutex_unlock(&hw_lock);
  842. +
  843. + return ret;
  844. +}
  845. +
  846. +void mtk_wed_flow_remove(int index)
  847. +{
  848. + struct mtk_wed_hw *hw = hw_list[index];
  849. +
  850. + if (!hw)
  851. + return;
  852. +
  853. + if (--hw->num_flows)
  854. + return;
  855. +
  856. + mutex_lock(&hw_lock);
  857. + if (!hw->wed_dev)
  858. + goto out;
  859. +
  860. + hw->wed_dev->wlan.offload_disable(hw->wed_dev);
  861. + mtk_wed_set_ext_int(hw->wed_dev, true);
  862. +
  863. +out:
  864. + mutex_unlock(&hw_lock);
  865. +}
  866. +
  867. +void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
  868. + void __iomem *wdma, int index)
  869. +{
  870. + static const struct mtk_wed_ops wed_ops = {
  871. + .attach = mtk_wed_attach,
  872. + .tx_ring_setup = mtk_wed_tx_ring_setup,
  873. + .txfree_ring_setup = mtk_wed_txfree_ring_setup,
  874. + .start = mtk_wed_start,
  875. + .stop = mtk_wed_stop,
  876. + .reset_dma = mtk_wed_reset_dma,
  877. + .reg_read = wed_r32,
  878. + .reg_write = wed_w32,
  879. + .irq_get = mtk_wed_irq_get,
  880. + .irq_set_mask = mtk_wed_irq_set_mask,
  881. + .detach = mtk_wed_detach,
  882. + };
  883. + struct device_node *eth_np = eth->dev->of_node;
  884. + struct platform_device *pdev;
  885. + struct mtk_wed_hw *hw;
  886. + struct regmap *regs;
  887. + int irq;
  888. +
  889. + if (!np)
  890. + return;
  891. +
  892. + pdev = of_find_device_by_node(np);
  893. + if (!pdev)
  894. + return;
  895. +
  896. + get_device(&pdev->dev);
  897. + irq = platform_get_irq(pdev, 0);
  898. + if (irq < 0)
  899. + return;
  900. +
  901. + regs = syscon_regmap_lookup_by_phandle(np, NULL);
  902. + if (!regs)
  903. + return;
  904. +
  905. + rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
  906. +
  907. + mutex_lock(&hw_lock);
  908. +
  909. + if (WARN_ON(hw_list[index]))
  910. + goto unlock;
  911. +
  912. + hw = kzalloc(sizeof(*hw), GFP_KERNEL);
  913. + hw->node = np;
  914. + hw->regs = regs;
  915. + hw->eth = eth;
  916. + hw->dev = &pdev->dev;
  917. + hw->wdma = wdma;
  918. + hw->index = index;
  919. + hw->irq = irq;
  920. + hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
  921. + "mediatek,pcie-mirror");
  922. + hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
  923. + "mediatek,hifsys");
  924. + if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
  925. + kfree(hw);
  926. + goto unlock;
  927. + }
  928. +
  929. + if (!index) {
  930. + regmap_write(hw->mirror, 0, 0);
  931. + regmap_write(hw->mirror, 4, 0);
  932. + }
  933. + mtk_wed_hw_add_debugfs(hw);
  934. +
  935. + hw_list[index] = hw;
  936. +
  937. +unlock:
  938. + mutex_unlock(&hw_lock);
  939. +}
  940. +
  941. +void mtk_wed_exit(void)
  942. +{
  943. + int i;
  944. +
  945. + rcu_assign_pointer(mtk_soc_wed_ops, NULL);
  946. +
  947. + synchronize_rcu();
  948. +
  949. + for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
  950. + struct mtk_wed_hw *hw;
  951. +
  952. + hw = hw_list[i];
  953. + if (!hw)
  954. + continue;
  955. +
  956. + hw_list[i] = NULL;
  957. + debugfs_remove(hw->debugfs_dir);
  958. + put_device(hw->dev);
  959. + kfree(hw);
  960. + }
  961. +}
  962. --- /dev/null
  963. +++ b/drivers/net/ethernet/mediatek/mtk_wed.h
  964. @@ -0,0 +1,128 @@
  965. +// SPDX-License-Identifier: GPL-2.0-only
  966. +/* Copyright (C) 2021 Felix Fietkau <[email protected]> */
  967. +
  968. +#ifndef __MTK_WED_PRIV_H
  969. +#define __MTK_WED_PRIV_H
  970. +
  971. +#include <linux/soc/mediatek/mtk_wed.h>
  972. +#include <linux/debugfs.h>
  973. +#include <linux/regmap.h>
  974. +
  975. +struct mtk_eth;
  976. +
  977. +struct mtk_wed_hw {
  978. + struct device_node *node;
  979. + struct mtk_eth *eth;
  980. + struct regmap *regs;
  981. + struct regmap *hifsys;
  982. + struct device *dev;
  983. + void __iomem *wdma;
  984. + struct regmap *mirror;
  985. + struct dentry *debugfs_dir;
  986. + struct mtk_wed_device *wed_dev;
  987. + u32 debugfs_reg;
  988. + u32 num_flows;
  989. + char dirname[5];
  990. + int irq;
  991. + int index;
  992. +};
  993. +
  994. +
  995. +#ifdef CONFIG_NET_MEDIATEK_SOC_WED
  996. +static inline void
  997. +wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
  998. +{
  999. + regmap_write(dev->hw->regs, reg, val);
  1000. +}
  1001. +
  1002. +static inline u32
  1003. +wed_r32(struct mtk_wed_device *dev, u32 reg)
  1004. +{
  1005. + unsigned int val;
  1006. +
  1007. + regmap_read(dev->hw->regs, reg, &val);
  1008. +
  1009. + return val;
  1010. +}
  1011. +
  1012. +static inline void
  1013. +wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
  1014. +{
  1015. + writel(val, dev->hw->wdma + reg);
  1016. +}
  1017. +
  1018. +static inline u32
  1019. +wdma_r32(struct mtk_wed_device *dev, u32 reg)
  1020. +{
  1021. + return readl(dev->hw->wdma + reg);
  1022. +}
  1023. +
  1024. +static inline u32
  1025. +wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
  1026. +{
  1027. + if (!dev->tx_ring[ring].wpdma)
  1028. + return 0;
  1029. +
  1030. + return readl(dev->tx_ring[ring].wpdma + reg);
  1031. +}
  1032. +
  1033. +static inline void
  1034. +wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
  1035. +{
  1036. + if (!dev->tx_ring[ring].wpdma)
  1037. + return;
  1038. +
  1039. + writel(val, dev->tx_ring[ring].wpdma + reg);
  1040. +}
  1041. +
  1042. +static inline u32
  1043. +wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg)
  1044. +{
  1045. + if (!dev->txfree_ring.wpdma)
  1046. + return 0;
  1047. +
  1048. + return readl(dev->txfree_ring.wpdma + reg);
  1049. +}
  1050. +
  1051. +static inline void
  1052. +wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
  1053. +{
  1054. + if (!dev->txfree_ring.wpdma)
  1055. + return;
  1056. +
  1057. + writel(val, dev->txfree_ring.wpdma + reg);
  1058. +}
  1059. +
  1060. +void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
  1061. + void __iomem *wdma, int index);
  1062. +void mtk_wed_exit(void);
  1063. +int mtk_wed_flow_add(int index);
  1064. +void mtk_wed_flow_remove(int index);
  1065. +#else
  1066. +static inline void
  1067. +mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
  1068. + void __iomem *wdma, int index)
  1069. +{
  1070. +}
  1071. +static inline void
  1072. +mtk_wed_exit(void)
  1073. +{
  1074. +}
  1075. +static inline int mtk_wed_flow_add(int index)
  1076. +{
  1077. + return -EINVAL;
  1078. +}
  1079. +static inline void mtk_wed_flow_remove(int index)
  1080. +{
  1081. +}
  1082. +#endif
  1083. +
  1084. +#ifdef CONFIG_DEBUG_FS
  1085. +void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw);
  1086. +#else
  1087. +static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
  1088. +{
  1089. +}
  1090. +#endif
  1091. +
  1092. +#endif
  1093. --- /dev/null
  1094. +++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
  1095. @@ -0,0 +1,175 @@
  1096. +// SPDX-License-Identifier: GPL-2.0-only
  1097. +/* Copyright (C) 2021 Felix Fietkau <[email protected]> */
  1098. +
  1099. +#include <linux/seq_file.h>
  1100. +#include "mtk_wed.h"
  1101. +#include "mtk_wed_regs.h"
  1102. +
  1103. +struct reg_dump {
  1104. + const char *name;
  1105. + u16 offset;
  1106. + u8 type;
  1107. + u8 base;
  1108. +};
  1109. +
  1110. +enum {
  1111. + DUMP_TYPE_STRING,
  1112. + DUMP_TYPE_WED,
  1113. + DUMP_TYPE_WDMA,
  1114. + DUMP_TYPE_WPDMA_TX,
  1115. + DUMP_TYPE_WPDMA_TXFREE,
  1116. +};
  1117. +
  1118. +#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
  1119. +#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
  1120. +#define DUMP_RING(_prefix, _base, ...) \
  1121. + { _prefix " BASE", _base, __VA_ARGS__ }, \
  1122. + { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
  1123. + { _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \
  1124. + { _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
  1125. +
  1126. +#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
  1127. +#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
  1128. +
  1129. +#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
  1130. +#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA)
  1131. +
  1132. +#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
  1133. +#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
  1134. +
  1135. +static void
  1136. +print_reg_val(struct seq_file *s, const char *name, u32 val)
  1137. +{
  1138. + seq_printf(s, "%-32s %08x\n", name, val);
  1139. +}
  1140. +
  1141. +static void
  1142. +dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
  1143. + const struct reg_dump *regs, int n_regs)
  1144. +{
  1145. + const struct reg_dump *cur;
  1146. + u32 val;
  1147. +
  1148. + for (cur = regs; cur < &regs[n_regs]; cur++) {
  1149. + switch (cur->type) {
  1150. + case DUMP_TYPE_STRING:
  1151. + seq_printf(s, "%s======== %s:\n",
  1152. + cur > regs ? "\n" : "",
  1153. + cur->name);
  1154. + continue;
  1155. + case DUMP_TYPE_WED:
  1156. + val = wed_r32(dev, cur->offset);
  1157. + break;
  1158. + case DUMP_TYPE_WDMA:
  1159. + val = wdma_r32(dev, cur->offset);
  1160. + break;
  1161. + case DUMP_TYPE_WPDMA_TX:
  1162. + val = wpdma_tx_r32(dev, cur->base, cur->offset);
  1163. + break;
  1164. + case DUMP_TYPE_WPDMA_TXFREE:
  1165. + val = wpdma_txfree_r32(dev, cur->offset);
  1166. + break;
  1167. + }
  1168. + print_reg_val(s, cur->name, val);
  1169. + }
  1170. +}
  1171. +
  1172. +
  1173. +static int
  1174. +wed_txinfo_show(struct seq_file *s, void *data)
  1175. +{
  1176. + static const struct reg_dump regs[] = {
  1177. + DUMP_STR("WED TX"),
  1178. + DUMP_WED(WED_TX_MIB(0)),
  1179. + DUMP_WED_RING(WED_RING_TX(0)),
  1180. +
  1181. + DUMP_WED(WED_TX_MIB(1)),
  1182. + DUMP_WED_RING(WED_RING_TX(1)),
  1183. +
  1184. + DUMP_STR("WPDMA TX"),
  1185. + DUMP_WED(WED_WPDMA_TX_MIB(0)),
  1186. + DUMP_WED_RING(WED_WPDMA_RING_TX(0)),
  1187. + DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)),
  1188. +
  1189. + DUMP_WED(WED_WPDMA_TX_MIB(1)),
  1190. + DUMP_WED_RING(WED_WPDMA_RING_TX(1)),
  1191. + DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)),
  1192. +
  1193. + DUMP_STR("WPDMA TX"),
  1194. + DUMP_WPDMA_TX_RING(0),
  1195. + DUMP_WPDMA_TX_RING(1),
  1196. +
  1197. + DUMP_STR("WED WDMA RX"),
  1198. + DUMP_WED(WED_WDMA_RX_MIB(0)),
  1199. + DUMP_WED_RING(WED_WDMA_RING_RX(0)),
  1200. + DUMP_WED(WED_WDMA_RX_THRES(0)),
  1201. + DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)),
  1202. + DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)),
  1203. +
  1204. + DUMP_WED(WED_WDMA_RX_MIB(1)),
  1205. + DUMP_WED_RING(WED_WDMA_RING_RX(1)),
  1206. + DUMP_WED(WED_WDMA_RX_THRES(1)),
  1207. + DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)),
  1208. + DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)),
  1209. +
  1210. + DUMP_STR("WDMA RX"),
  1211. + DUMP_WDMA(WDMA_GLO_CFG),
  1212. + DUMP_WDMA_RING(WDMA_RING_RX(0)),
  1213. + DUMP_WDMA_RING(WDMA_RING_RX(1)),
  1214. + };
  1215. + struct mtk_wed_hw *hw = s->private;
  1216. + struct mtk_wed_device *dev = hw->wed_dev;
  1217. +
  1218. + if (!dev)
  1219. + return 0;
  1220. +
  1221. + dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
  1222. +
  1223. + return 0;
  1224. +}
  1225. +DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
  1226. +
  1227. +
  1228. +static int
  1229. +mtk_wed_reg_set(void *data, u64 val)
  1230. +{
  1231. + struct mtk_wed_hw *hw = data;
  1232. +
  1233. + regmap_write(hw->regs, hw->debugfs_reg, val);
  1234. +
  1235. + return 0;
  1236. +}
  1237. +
  1238. +static int
  1239. +mtk_wed_reg_get(void *data, u64 *val)
  1240. +{
  1241. + struct mtk_wed_hw *hw = data;
  1242. + unsigned int regval;
  1243. + int ret;
  1244. +
  1245. + ret = regmap_read(hw->regs, hw->debugfs_reg, &regval);
  1246. + if (ret)
  1247. + return ret;
  1248. +
  1249. + *val = regval;
  1250. +
  1251. + return 0;
  1252. +}
  1253. +
  1254. +DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set,
  1255. + "0x%08llx\n");
  1256. +
  1257. +void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
  1258. +{
  1259. + struct dentry *dir;
  1260. +
  1261. + snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index);
  1262. + dir = debugfs_create_dir(hw->dirname, NULL);
  1263. + if (!dir)
  1264. + return;
  1265. +
  1266. + hw->debugfs_dir = dir;
  1267. + debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
  1268. + debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
  1269. + debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
  1270. +}
  1271. --- /dev/null
  1272. +++ b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
  1273. @@ -0,0 +1,8 @@
  1274. +// SPDX-License-Identifier: GPL-2.0-only
  1275. +/* Copyright (C) 2020 Felix Fietkau <[email protected]> */
  1276. +
  1277. +#include <linux/kernel.h>
  1278. +#include <linux/soc/mediatek/mtk_wed.h>
  1279. +
  1280. +const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
  1281. +EXPORT_SYMBOL_GPL(mtk_soc_wed_ops);
  1282. --- /dev/null
  1283. +++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
  1284. @@ -0,0 +1,251 @@
  1285. +// SPDX-License-Identifier: GPL-2.0-only
  1286. +/* Copyright (C) 2020 Felix Fietkau <[email protected]> */
  1287. +
  1288. +#ifndef __MTK_WED_REGS_H
  1289. +#define __MTK_WED_REGS_H
  1290. +
  1291. +#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
  1292. +#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
  1293. +#define MTK_WDMA_DESC_CTRL_BURST BIT(16)
  1294. +#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
  1295. +#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
  1296. +#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
  1297. +
  1298. +struct mtk_wdma_desc {
  1299. + __le32 buf0;
  1300. + __le32 ctrl;
  1301. + __le32 buf1;
  1302. + __le32 info;
  1303. +} __packed __aligned(4);
  1304. +
  1305. +#define MTK_WED_RESET 0x008
  1306. +#define MTK_WED_RESET_TX_BM BIT(0)
  1307. +#define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
  1308. +#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
  1309. +#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
  1310. +#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11)
  1311. +#define MTK_WED_RESET_WED_TX_DMA BIT(12)
  1312. +#define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
  1313. +#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
  1314. +#define MTK_WED_RESET_WED BIT(31)
  1315. +
  1316. +#define MTK_WED_CTRL 0x00c
  1317. +#define MTK_WED_CTRL_WPDMA_INT_AGENT_EN BIT(0)
  1318. +#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
  1319. +#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
  1320. +#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
  1321. +#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
  1322. +#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
  1323. +#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
  1324. +#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
  1325. +#define MTK_WED_CTRL_RESERVE_EN BIT(12)
  1326. +#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
  1327. +#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
  1328. +#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
  1329. +
  1330. +#define MTK_WED_EXT_INT_STATUS 0x020
  1331. +#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
  1332. +#define MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD BIT(1)
  1333. +#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
  1334. +#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
  1335. +#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
  1336. +#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
  1337. +#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
  1338. +#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
  1339. +#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
  1340. +#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
  1341. +#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19)
  1342. +#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20)
  1343. +#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21)
  1344. +#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22)
  1345. +#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24)
  1346. +#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
  1347. + MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
  1348. + MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
  1349. + MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
  1350. + MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
  1351. + MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
  1352. + MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \
  1353. + MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR)
  1354. +
  1355. +#define MTK_WED_EXT_INT_MASK 0x028
  1356. +
  1357. +#define MTK_WED_STATUS 0x060
  1358. +#define MTK_WED_STATUS_TX GENMASK(15, 8)
  1359. +
  1360. +#define MTK_WED_TX_BM_CTRL 0x080
  1361. +#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
  1362. +#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
  1363. +#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
  1364. +
  1365. +#define MTK_WED_TX_BM_BASE 0x084
  1366. +
  1367. +#define MTK_WED_TX_BM_TKID 0x088
  1368. +#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
  1369. +#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
  1370. +
  1371. +#define MTK_WED_TX_BM_BUF_LEN 0x08c
  1372. +
  1373. +#define MTK_WED_TX_BM_INTF 0x09c
  1374. +#define MTK_WED_TX_BM_INTF_TKID GENMASK(15, 0)
  1375. +#define MTK_WED_TX_BM_INTF_TKFIFO_FDEP GENMASK(23, 16)
  1376. +#define MTK_WED_TX_BM_INTF_TKID_VALID BIT(28)
  1377. +#define MTK_WED_TX_BM_INTF_TKID_READ BIT(29)
  1378. +
  1379. +#define MTK_WED_TX_BM_DYN_THR 0x0a0
  1380. +#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
  1381. +#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
  1382. +
  1383. +#define MTK_WED_INT_STATUS 0x200
  1384. +#define MTK_WED_INT_MASK 0x204
  1385. +
  1386. +#define MTK_WED_GLO_CFG 0x208
  1387. +#define MTK_WED_GLO_CFG_TX_DMA_EN BIT(0)
  1388. +#define MTK_WED_GLO_CFG_TX_DMA_BUSY BIT(1)
  1389. +#define MTK_WED_GLO_CFG_RX_DMA_EN BIT(2)
  1390. +#define MTK_WED_GLO_CFG_RX_DMA_BUSY BIT(3)
  1391. +#define MTK_WED_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
  1392. +#define MTK_WED_GLO_CFG_TX_WB_DDONE BIT(6)
  1393. +#define MTK_WED_GLO_CFG_BIG_ENDIAN BIT(7)
  1394. +#define MTK_WED_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
  1395. +#define MTK_WED_GLO_CFG_TX_BT_SIZE_LO BIT(9)
  1396. +#define MTK_WED_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
  1397. +#define MTK_WED_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
  1398. +#define MTK_WED_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
  1399. +#define MTK_WED_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
  1400. +#define MTK_WED_GLO_CFG_SW_RESET BIT(24)
  1401. +#define MTK_WED_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
  1402. +#define MTK_WED_GLO_CFG_OMIT_RX_INFO BIT(27)
  1403. +#define MTK_WED_GLO_CFG_OMIT_TX_INFO BIT(28)
  1404. +#define MTK_WED_GLO_CFG_BYTE_SWAP BIT(29)
  1405. +#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31)
  1406. +
  1407. +#define MTK_WED_RESET_IDX 0x20c
  1408. +#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
  1409. +#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
  1410. +
  1411. +#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
  1412. +
  1413. +#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10)
  1414. +
  1415. +#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
  1416. +
  1417. +#define MTK_WED_WPDMA_INT_TRIGGER 0x504
  1418. +#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
  1419. +#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
  1420. +
  1421. +#define MTK_WED_WPDMA_GLO_CFG 0x508
  1422. +#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN BIT(0)
  1423. +#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY BIT(1)
  1424. +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN BIT(2)
  1425. +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
  1426. +#define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
  1427. +#define MTK_WED_WPDMA_GLO_CFG_TX_WB_DDONE BIT(6)
  1428. +#define MTK_WED_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
  1429. +#define MTK_WED_WPDMA_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
  1430. +#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_LO BIT(9)
  1431. +#define MTK_WED_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
  1432. +#define MTK_WED_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
  1433. +#define MTK_WED_WPDMA_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
  1434. +#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
  1435. +#define MTK_WED_WPDMA_GLO_CFG_SW_RESET BIT(24)
  1436. +#define MTK_WED_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
  1437. +#define MTK_WED_WPDMA_GLO_CFG_OMIT_RX_INFO BIT(27)
  1438. +#define MTK_WED_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28)
  1439. +#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29)
  1440. +#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
  1441. +
  1442. +#define MTK_WED_WPDMA_RESET_IDX 0x50c
  1443. +#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
  1444. +#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16)
  1445. +
  1446. +#define MTK_WED_WPDMA_INT_CTRL 0x520
  1447. +#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21)
  1448. +
  1449. +#define MTK_WED_WPDMA_INT_MASK 0x524
  1450. +
  1451. +#define MTK_WED_PCIE_CFG_BASE 0x560
  1452. +
  1453. +#define MTK_WED_PCIE_INT_TRIGGER 0x570
  1454. +#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
  1455. +
  1456. +#define MTK_WED_WPDMA_CFG_BASE 0x580
  1457. +
  1458. +#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
  1459. +#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
  1460. +
  1461. +#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
  1462. +#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
  1463. +#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
  1464. +#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
  1465. +
  1466. +#define MTK_WED_WDMA_GLO_CFG 0xa04
  1467. +#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
  1468. +#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
  1469. +#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
  1470. +#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
  1471. +#define MTK_WED_WDMA_GLO_CFG_TX_WB_DDONE BIT(6)
  1472. +#define MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE BIT(13)
  1473. +#define MTK_WED_WDMA_GLO_CFG_WCOMPLETE_SEL BIT(16)
  1474. +#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_RXDMA_BYPASS BIT(17)
  1475. +#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_BYPASS BIT(18)
  1476. +#define MTK_WED_WDMA_GLO_CFG_FSM_RETURN_IDLE BIT(19)
  1477. +#define MTK_WED_WDMA_GLO_CFG_WAIT_COHERENT BIT(20)
  1478. +#define MTK_WED_WDMA_GLO_CFG_AXI_W_AFTER_AW BIT(21)
  1479. +#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY_SINGLE_W BIT(22)
  1480. +#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY BIT(23)
  1481. +#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP BIT(24)
  1482. +#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE BIT(25)
  1483. +#define MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE BIT(26)
  1484. +#define MTK_WED_WDMA_GLO_CFG_RXDRV_CLKGATE_BYPASS BIT(30)
  1485. +
  1486. +#define MTK_WED_WDMA_RESET_IDX 0xa08
  1487. +#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
  1488. +#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
  1489. +
  1490. +#define MTK_WED_WDMA_INT_TRIGGER 0xa28
  1491. +#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
  1492. +
  1493. +#define MTK_WED_WDMA_INT_CTRL 0xa2c
  1494. +#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
  1495. +
  1496. +#define MTK_WED_WDMA_OFFSET0 0xaa4
  1497. +#define MTK_WED_WDMA_OFFSET1 0xaa8
  1498. +
  1499. +#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4)
  1500. +#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
  1501. +#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
  1502. +
  1503. +#define MTK_WED_RING_OFS_BASE 0x00
  1504. +#define MTK_WED_RING_OFS_COUNT 0x04
  1505. +#define MTK_WED_RING_OFS_CPU_IDX 0x08
  1506. +#define MTK_WED_RING_OFS_DMA_IDX 0x0c
  1507. +
  1508. +#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10)
  1509. +
  1510. +#define MTK_WDMA_GLO_CFG 0x204
  1511. +#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES GENMASK(28, 26)
  1512. +
  1513. +#define MTK_WDMA_RESET_IDX 0x208
  1514. +#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
  1515. +#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
  1516. +
  1517. +#define MTK_WDMA_INT_MASK 0x228
  1518. +#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0)
  1519. +#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16)
  1520. +#define MTK_WDMA_INT_MASK_TX_DELAY BIT(28)
  1521. +#define MTK_WDMA_INT_MASK_TX_COHERENT BIT(29)
  1522. +#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30)
  1523. +#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31)
  1524. +
  1525. +#define MTK_WDMA_INT_GRP1 0x250
  1526. +#define MTK_WDMA_INT_GRP2 0x254
  1527. +
  1528. +#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
  1529. +#define MTK_PCIE_MIRROR_MAP_EN BIT(0)
  1530. +#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
  1531. +
  1532. +/* DMA channel mapping */
  1533. +#define HIFSYS_DMA_AG_MAP 0x008
  1534. +
  1535. +#endif
  1536. --- /dev/null
  1537. +++ b/include/linux/soc/mediatek/mtk_wed.h
  1538. @@ -0,0 +1,131 @@
  1539. +#ifndef __MTK_WED_H
  1540. +#define __MTK_WED_H
  1541. +
  1542. +#include <linux/kernel.h>
  1543. +#include <linux/rcupdate.h>
  1544. +#include <linux/regmap.h>
  1545. +#include <linux/pci.h>
  1546. +
  1547. +#define MTK_WED_TX_QUEUES 2
  1548. +
  1549. +struct mtk_wed_hw;
  1550. +struct mtk_wdma_desc;
  1551. +
  1552. +struct mtk_wed_ring {
  1553. + struct mtk_wdma_desc *desc;
  1554. + dma_addr_t desc_phys;
  1555. + int size;
  1556. +
  1557. + u32 reg_base;
  1558. + void __iomem *wpdma;
  1559. +};
  1560. +
  1561. +struct mtk_wed_device {
  1562. +#ifdef CONFIG_NET_MEDIATEK_SOC_WED
  1563. + const struct mtk_wed_ops *ops;
  1564. + struct device *dev;
  1565. + struct mtk_wed_hw *hw;
  1566. + bool init_done, running;
  1567. + int wdma_idx;
  1568. + int irq;
  1569. +
  1570. + struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
  1571. + struct mtk_wed_ring txfree_ring;
  1572. + struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
  1573. +
  1574. + struct {
  1575. + int size;
  1576. + void **pages;
  1577. + struct mtk_wdma_desc *desc;
  1578. + dma_addr_t desc_phys;
  1579. + } buf_ring;
  1580. +
  1581. + /* filled by driver: */
  1582. + struct {
  1583. + struct pci_dev *pci_dev;
  1584. +
  1585. + u32 wpdma_phys;
  1586. +
  1587. + u16 token_start;
  1588. + unsigned int nbuf;
  1589. +
  1590. + u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
  1591. + int (*offload_enable)(struct mtk_wed_device *wed);
  1592. + void (*offload_disable)(struct mtk_wed_device *wed);
  1593. + } wlan;
  1594. +#endif
  1595. +};
  1596. +
  1597. +struct mtk_wed_ops {
  1598. + int (*attach)(struct mtk_wed_device *dev);
  1599. + int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
  1600. + void __iomem *regs);
  1601. + int (*txfree_ring_setup)(struct mtk_wed_device *dev,
  1602. + void __iomem *regs);
  1603. + void (*detach)(struct mtk_wed_device *dev);
  1604. +
  1605. + void (*stop)(struct mtk_wed_device *dev);
  1606. + void (*start)(struct mtk_wed_device *dev, u32 irq_mask);
  1607. + void (*reset_dma)(struct mtk_wed_device *dev);
  1608. +
  1609. + u32 (*reg_read)(struct mtk_wed_device *dev, u32 reg);
  1610. + void (*reg_write)(struct mtk_wed_device *dev, u32 reg, u32 val);
  1611. +
  1612. + u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
  1613. + void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
  1614. +};
  1615. +
  1616. +extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
  1617. +
  1618. +static inline int
  1619. +mtk_wed_device_attach(struct mtk_wed_device *dev)
  1620. +{
  1621. + int ret = -ENODEV;
  1622. +
  1623. +#ifdef CONFIG_NET_MEDIATEK_SOC_WED
  1624. + rcu_read_lock();
  1625. + dev->ops = rcu_dereference(mtk_soc_wed_ops);
  1626. + if (dev->ops)
  1627. + ret = dev->ops->attach(dev);
  1628. + else
  1629. + rcu_read_unlock();
  1630. +
  1631. + if (ret)
  1632. + dev->ops = NULL;
  1633. +#endif
  1634. +
  1635. + return ret;
  1636. +}
  1637. +
  1638. +#ifdef CONFIG_NET_MEDIATEK_SOC_WED
  1639. +#define mtk_wed_device_active(_dev) !!(_dev)->ops
  1640. +#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
  1641. +#define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask)
  1642. +#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \
  1643. + (_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
  1644. +#define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
  1645. + (_dev)->ops->txfree_ring_setup(_dev, _regs)
  1646. +#define mtk_wed_device_reg_read(_dev, _reg) \
  1647. + (_dev)->ops->reg_read(_dev, _reg)
  1648. +#define mtk_wed_device_reg_write(_dev, _reg, _val) \
  1649. + (_dev)->ops->reg_write(_dev, _reg, _val)
  1650. +#define mtk_wed_device_irq_get(_dev, _mask) \
  1651. + (_dev)->ops->irq_get(_dev, _mask)
  1652. +#define mtk_wed_device_irq_set_mask(_dev, _mask) \
  1653. + (_dev)->ops->irq_set_mask(_dev, _mask)
  1654. +#else
  1655. +static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
  1656. +{
  1657. + return false;
  1658. +}
  1659. +#define mtk_wed_device_detach(_dev) do {} while (0)
  1660. +#define mtk_wed_device_start(_dev, _mask) do {} while (0)
  1661. +#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
  1662. +#define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
  1663. +#define mtk_wed_device_reg_read(_dev, _reg) 0
  1664. +#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
  1665. +#define mtk_wed_device_irq_get(_dev, _mask) 0
  1666. +#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
  1667. +#endif
  1668. +
  1669. +#endif