0047-DMA-ralink-add-rt2880-dma-engine.patch 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757
  1. From f1c4d9e622c800e1f38b3818f933ec7597d1ccfb Mon Sep 17 00:00:00 2001
  2. From: John Crispin <[email protected]>
  3. Date: Sun, 27 Jul 2014 09:29:51 +0100
  4. Subject: [PATCH 47/53] DMA: ralink: add rt2880 dma engine
  5. Signed-off-by: John Crispin <[email protected]>
  6. ---
  7. drivers/dma/Kconfig | 6 +
  8. drivers/dma/Makefile | 1 +
  9. drivers/dma/ralink-gdma.c | 577 +++++++++++++++++++++++++++++++++++++++++++++
  10. include/linux/dmaengine.h | 1 +
  11. 4 files changed, 585 insertions(+)
  12. create mode 100644 drivers/dma/ralink-gdma.c
  13. --- a/drivers/dma/Kconfig
  14. +++ b/drivers/dma/Kconfig
  15. @@ -40,6 +40,18 @@ config ASYNC_TX_ENABLE_CHANNEL_SWITCH
  16. config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
  17. bool
  18. +config DMA_RALINK
  19. + tristate "RALINK DMA support"
  20. + depends on RALINK && !SOC_RT288X
  21. + select DMA_ENGINE
  22. + select DMA_VIRTUAL_CHANNELS
  23. +
  24. +config MTK_HSDMA
  25. + tristate "MTK HSDMA support"
  26. + depends on RALINK && SOC_MT7621
  27. + select DMA_ENGINE
  28. + select DMA_VIRTUAL_CHANNELS
  29. +
  30. config DMA_ENGINE
  31. bool
  32. --- a/drivers/dma/Makefile
  33. +++ b/drivers/dma/Makefile
  34. @@ -71,6 +71,8 @@ obj-$(CONFIG_TI_EDMA) += edma.o
  35. obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
  36. obj-$(CONFIG_ZX_DMA) += zx_dma.o
  37. obj-$(CONFIG_ST_FDMA) += st_fdma.o
  38. +obj-$(CONFIG_DMA_RALINK) += ralink-gdma.o
  39. +obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
  40. obj-y += qcom/
  41. obj-y += xilinx/
  42. --- /dev/null
  43. +++ b/drivers/dma/ralink-gdma.c
  44. @@ -0,0 +1,928 @@
  45. +/*
  46. + * Copyright (C) 2013, Lars-Peter Clausen <[email protected]>
  47. + * GDMA4740 DMAC support
  48. + *
  49. + * This program is free software; you can redistribute it and/or modify it
  50. + * under the terms of the GNU General Public License as published by the
  51. + * Free Software Foundation; either version 2 of the License, or (at your
  52. + * option) any later version.
  53. + *
  54. + */
  55. +
  56. +#include <linux/dmaengine.h>
  57. +#include <linux/dma-mapping.h>
  58. +#include <linux/err.h>
  59. +#include <linux/init.h>
  60. +#include <linux/list.h>
  61. +#include <linux/module.h>
  62. +#include <linux/platform_device.h>
  63. +#include <linux/slab.h>
  64. +#include <linux/spinlock.h>
  65. +#include <linux/irq.h>
  66. +#include <linux/of_dma.h>
  67. +#include <linux/reset.h>
  68. +#include <linux/of_device.h>
  69. +
  70. +#include "virt-dma.h"
  71. +
  72. +#define GDMA_REG_SRC_ADDR(x) (0x00 + (x) * 0x10)
  73. +#define GDMA_REG_DST_ADDR(x) (0x04 + (x) * 0x10)
  74. +
  75. +#define GDMA_REG_CTRL0(x) (0x08 + (x) * 0x10)
  76. +#define GDMA_REG_CTRL0_TX_MASK 0xffff
  77. +#define GDMA_REG_CTRL0_TX_SHIFT 16
  78. +#define GDMA_REG_CTRL0_CURR_MASK 0xff
  79. +#define GDMA_REG_CTRL0_CURR_SHIFT 8
  80. +#define GDMA_REG_CTRL0_SRC_ADDR_FIXED BIT(7)
  81. +#define GDMA_REG_CTRL0_DST_ADDR_FIXED BIT(6)
  82. +#define GDMA_REG_CTRL0_BURST_MASK 0x7
  83. +#define GDMA_REG_CTRL0_BURST_SHIFT 3
  84. +#define GDMA_REG_CTRL0_DONE_INT BIT(2)
  85. +#define GDMA_REG_CTRL0_ENABLE BIT(1)
  86. +#define GDMA_REG_CTRL0_SW_MODE BIT(0)
  87. +
  88. +#define GDMA_REG_CTRL1(x) (0x0c + (x) * 0x10)
  89. +#define GDMA_REG_CTRL1_SEG_MASK 0xf
  90. +#define GDMA_REG_CTRL1_SEG_SHIFT 22
  91. +#define GDMA_REG_CTRL1_REQ_MASK 0x3f
  92. +#define GDMA_REG_CTRL1_SRC_REQ_SHIFT 16
  93. +#define GDMA_REG_CTRL1_DST_REQ_SHIFT 8
  94. +#define GDMA_REG_CTRL1_CONTINOUS BIT(14)
  95. +#define GDMA_REG_CTRL1_NEXT_MASK 0x1f
  96. +#define GDMA_REG_CTRL1_NEXT_SHIFT 3
  97. +#define GDMA_REG_CTRL1_COHERENT BIT(2)
  98. +#define GDMA_REG_CTRL1_FAIL BIT(1)
  99. +#define GDMA_REG_CTRL1_MASK BIT(0)
  100. +
  101. +#define GDMA_REG_UNMASK_INT 0x200
  102. +#define GDMA_REG_DONE_INT 0x204
  103. +
  104. +#define GDMA_REG_GCT 0x220
  105. +#define GDMA_REG_GCT_CHAN_MASK 0x3
  106. +#define GDMA_REG_GCT_CHAN_SHIFT 3
  107. +#define GDMA_REG_GCT_VER_MASK 0x3
  108. +#define GDMA_REG_GCT_VER_SHIFT 1
  109. +#define GDMA_REG_GCT_ARBIT_RR BIT(0)
  110. +
  111. +#define GDMA_REG_REQSTS 0x2a0
  112. +#define GDMA_REG_ACKSTS 0x2a4
  113. +#define GDMA_REG_FINSTS 0x2a8
  114. +
  115. +/* for RT305X gdma registers */
  116. +#define GDMA_RT305X_CTRL0_REQ_MASK 0xf
  117. +#define GDMA_RT305X_CTRL0_SRC_REQ_SHIFT 12
  118. +#define GDMA_RT305X_CTRL0_DST_REQ_SHIFT 8
  119. +
  120. +#define GDMA_RT305X_CTRL1_FAIL BIT(4)
  121. +#define GDMA_RT305X_CTRL1_NEXT_MASK 0x7
  122. +#define GDMA_RT305X_CTRL1_NEXT_SHIFT 1
  123. +
  124. +#define GDMA_RT305X_STATUS_INT 0x80
  125. +#define GDMA_RT305X_STATUS_SIGNAL 0x84
  126. +#define GDMA_RT305X_GCT 0x88
  127. +
  128. +/* for MT7621 gdma registers */
  129. +#define GDMA_REG_PERF_START(x) (0x230 + (x) * 0x8)
  130. +#define GDMA_REG_PERF_END(x) (0x234 + (x) * 0x8)
  131. +
  132. +enum gdma_dma_transfer_size {
  133. + GDMA_TRANSFER_SIZE_4BYTE = 0,
  134. + GDMA_TRANSFER_SIZE_8BYTE = 1,
  135. + GDMA_TRANSFER_SIZE_16BYTE = 2,
  136. + GDMA_TRANSFER_SIZE_32BYTE = 3,
  137. + GDMA_TRANSFER_SIZE_64BYTE = 4,
  138. +};
  139. +
  140. +struct gdma_dma_sg {
  141. + dma_addr_t src_addr;
  142. + dma_addr_t dst_addr;
  143. + u32 len;
  144. +};
  145. +
  146. +struct gdma_dma_desc {
  147. + struct virt_dma_desc vdesc;
  148. +
  149. + enum dma_transfer_direction direction;
  150. + bool cyclic;
  151. +
  152. + u32 residue;
  153. + unsigned int num_sgs;
  154. + struct gdma_dma_sg sg[];
  155. +};
  156. +
  157. +struct gdma_dmaengine_chan {
  158. + struct virt_dma_chan vchan;
  159. + unsigned int id;
  160. + unsigned int slave_id;
  161. +
  162. + dma_addr_t fifo_addr;
  163. + enum gdma_dma_transfer_size burst_size;
  164. +
  165. + struct gdma_dma_desc *desc;
  166. + unsigned int next_sg;
  167. +};
  168. +
  169. +struct gdma_dma_dev {
  170. + struct dma_device ddev;
  171. + struct device_dma_parameters dma_parms;
  172. + struct gdma_data *data;
  173. + void __iomem *base;
  174. + struct tasklet_struct task;
  175. + volatile unsigned long chan_issued;
  176. + atomic_t cnt;
  177. +
  178. + struct gdma_dmaengine_chan chan[];
  179. +};
  180. +
  181. +struct gdma_data
  182. +{
  183. + int chancnt;
  184. + u32 done_int_reg;
  185. + void (*init)(struct gdma_dma_dev *dma_dev);
  186. + int (*start_transfer)(struct gdma_dmaengine_chan *chan);
  187. +};
  188. +
  189. +static struct gdma_dma_dev *gdma_dma_chan_get_dev(
  190. + struct gdma_dmaengine_chan *chan)
  191. +{
  192. + return container_of(chan->vchan.chan.device, struct gdma_dma_dev,
  193. + ddev);
  194. +}
  195. +
  196. +static struct gdma_dmaengine_chan *to_gdma_dma_chan(struct dma_chan *c)
  197. +{
  198. + return container_of(c, struct gdma_dmaengine_chan, vchan.chan);
  199. +}
  200. +
  201. +static struct gdma_dma_desc *to_gdma_dma_desc(struct virt_dma_desc *vdesc)
  202. +{
  203. + return container_of(vdesc, struct gdma_dma_desc, vdesc);
  204. +}
  205. +
  206. +static inline uint32_t gdma_dma_read(struct gdma_dma_dev *dma_dev,
  207. + unsigned int reg)
  208. +{
  209. + return readl(dma_dev->base + reg);
  210. +}
  211. +
  212. +static inline void gdma_dma_write(struct gdma_dma_dev *dma_dev,
  213. + unsigned reg, uint32_t val)
  214. +{
  215. + writel(val, dma_dev->base + reg);
  216. +}
  217. +
  218. +static struct gdma_dma_desc *gdma_dma_alloc_desc(unsigned int num_sgs)
  219. +{
  220. + return kzalloc(sizeof(struct gdma_dma_desc) +
  221. + sizeof(struct gdma_dma_sg) * num_sgs, GFP_ATOMIC);
  222. +}
  223. +
  224. +static enum gdma_dma_transfer_size gdma_dma_maxburst(u32 maxburst)
  225. +{
  226. + if (maxburst < 2)
  227. + return GDMA_TRANSFER_SIZE_4BYTE;
  228. + else if (maxburst < 4)
  229. + return GDMA_TRANSFER_SIZE_8BYTE;
  230. + else if (maxburst < 8)
  231. + return GDMA_TRANSFER_SIZE_16BYTE;
  232. + else if (maxburst < 16)
  233. + return GDMA_TRANSFER_SIZE_32BYTE;
  234. + else
  235. + return GDMA_TRANSFER_SIZE_64BYTE;
  236. +}
  237. +
  238. +static int gdma_dma_config(struct dma_chan *c,
  239. + struct dma_slave_config *config)
  240. +{
  241. + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
  242. + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
  243. +
  244. + if (config->device_fc) {
  245. + dev_err(dma_dev->ddev.dev, "not support flow controller\n");
  246. + return -EINVAL;
  247. + }
  248. +
  249. + switch (config->direction) {
  250. + case DMA_MEM_TO_DEV:
  251. + if (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) {
  252. + dev_err(dma_dev->ddev.dev, "only support 4 byte buswidth\n");
  253. + return -EINVAL;
  254. + }
  255. + chan->slave_id = config->slave_id;
  256. + chan->fifo_addr = config->dst_addr;
  257. + chan->burst_size = gdma_dma_maxburst(config->dst_maxburst);
  258. + break;
  259. + case DMA_DEV_TO_MEM:
  260. + if (config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) {
  261. + dev_err(dma_dev->ddev.dev, "only support 4 byte buswidth\n");
  262. + return -EINVAL;
  263. + }
  264. + chan->slave_id = config->slave_id;
  265. + chan->fifo_addr = config->src_addr;
  266. + chan->burst_size = gdma_dma_maxburst(config->src_maxburst);
  267. + break;
  268. + default:
  269. + dev_err(dma_dev->ddev.dev, "direction type %d error\n",
  270. + config->direction);
  271. + return -EINVAL;
  272. + }
  273. +
  274. + return 0;
  275. +}
  276. +
  277. +static int gdma_dma_terminate_all(struct dma_chan *c)
  278. +{
  279. + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
  280. + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
  281. + unsigned long flags, timeout;
  282. + LIST_HEAD(head);
  283. + int i = 0;
  284. +
  285. + spin_lock_irqsave(&chan->vchan.lock, flags);
  286. + chan->desc = NULL;
  287. + clear_bit(chan->id, &dma_dev->chan_issued);
  288. + vchan_get_all_descriptors(&chan->vchan, &head);
  289. + spin_unlock_irqrestore(&chan->vchan.lock, flags);
  290. +
  291. + vchan_dma_desc_free_list(&chan->vchan, &head);
  292. +
  293. + /* wait dma transfer complete */
  294. + timeout = jiffies + msecs_to_jiffies(5000);
  295. + while (gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id)) &
  296. + GDMA_REG_CTRL0_ENABLE) {
  297. + if (time_after_eq(jiffies, timeout)) {
  298. + dev_err(dma_dev->ddev.dev, "chan %d wait timeout\n",
  299. + chan->id);
  300. + /* restore to init value */
  301. + gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), 0);
  302. + break;
  303. + }
  304. + cpu_relax();
  305. + i++;
  306. + }
  307. +
  308. + if (i)
  309. + dev_dbg(dma_dev->ddev.dev, "terminate chan %d loops %d\n",
  310. + chan->id, i);
  311. +
  312. + return 0;
  313. +}
  314. +
  315. +static void rt305x_dump_reg(struct gdma_dma_dev *dma_dev, int id)
  316. +{
  317. + dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, " \
  318. + "ctr1 %08x, intr %08x, signal %08x\n", id,
  319. + gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)),
  320. + gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)),
  321. + gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)),
  322. + gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)),
  323. + gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_INT),
  324. + gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_SIGNAL));
  325. +}
  326. +
  327. +static int rt305x_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
  328. +{
  329. + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
  330. + dma_addr_t src_addr, dst_addr;
  331. + struct gdma_dma_sg *sg;
  332. + uint32_t ctrl0, ctrl1;
  333. +
  334. + /* verify chan is already stopped */
  335. + ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
  336. + if (unlikely(ctrl0 & GDMA_REG_CTRL0_ENABLE)) {
  337. + dev_err(dma_dev->ddev.dev, "chan %d is start(%08x).\n",
  338. + chan->id, ctrl0);
  339. + rt305x_dump_reg(dma_dev, chan->id);
  340. + return -EINVAL;
  341. + }
  342. +
  343. + sg = &chan->desc->sg[chan->next_sg];
  344. + if (chan->desc->direction == DMA_MEM_TO_DEV) {
  345. + src_addr = sg->src_addr;
  346. + dst_addr = chan->fifo_addr;
  347. + ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED | \
  348. + (8 << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) | \
  349. + (chan->slave_id << GDMA_RT305X_CTRL0_DST_REQ_SHIFT);
  350. + } else if (chan->desc->direction == DMA_DEV_TO_MEM) {
  351. + src_addr = chan->fifo_addr;
  352. + dst_addr = sg->dst_addr;
  353. + ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED | \
  354. + (chan->slave_id << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) | \
  355. + (8 << GDMA_RT305X_CTRL0_DST_REQ_SHIFT);
  356. + } else if (chan->desc->direction == DMA_MEM_TO_MEM) {
  357. + /*
  358. + * TODO: memcpy function have bugs. sometime it will copy
  359. + * more 8 bytes data when using dmatest verify.
  360. + */
  361. + src_addr = sg->src_addr;
  362. + dst_addr = sg->dst_addr;
  363. + ctrl0 = GDMA_REG_CTRL0_SW_MODE | \
  364. + (8 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
  365. + (8 << GDMA_REG_CTRL1_DST_REQ_SHIFT);
  366. + } else {
  367. + dev_err(dma_dev->ddev.dev, "direction type %d error\n",
  368. + chan->desc->direction);
  369. + return -EINVAL;
  370. + }
  371. +
  372. + ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | \
  373. + (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) | \
  374. + GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE;
  375. + ctrl1 = chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
  376. +
  377. + chan->next_sg++;
  378. + gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
  379. + gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
  380. + gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
  381. +
  382. + /* make sure next_sg is update */
  383. + wmb();
  384. + gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
  385. +
  386. + return 0;
  387. +}
  388. +
  389. +static void rt3883_dump_reg(struct gdma_dma_dev *dma_dev, int id)
  390. +{
  391. + dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, " \
  392. + "ctr1 %08x, unmask %08x, done %08x, " \
  393. + "req %08x, ack %08x, fin %08x\n", id,
  394. + gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)),
  395. + gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)),
  396. + gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)),
  397. + gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)),
  398. + gdma_dma_read(dma_dev, GDMA_REG_UNMASK_INT),
  399. + gdma_dma_read(dma_dev, GDMA_REG_DONE_INT),
  400. + gdma_dma_read(dma_dev, GDMA_REG_REQSTS),
  401. + gdma_dma_read(dma_dev, GDMA_REG_ACKSTS),
  402. + gdma_dma_read(dma_dev, GDMA_REG_FINSTS));
  403. +}
  404. +
  405. +static int rt3883_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
  406. +{
  407. + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
  408. + dma_addr_t src_addr, dst_addr;
  409. + struct gdma_dma_sg *sg;
  410. + uint32_t ctrl0, ctrl1;
  411. +
  412. + /* verify chan is already stopped */
  413. + ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
  414. + if (unlikely(ctrl0 & GDMA_REG_CTRL0_ENABLE)) {
  415. + dev_err(dma_dev->ddev.dev, "chan %d is start(%08x).\n",
  416. + chan->id, ctrl0);
  417. + rt3883_dump_reg(dma_dev, chan->id);
  418. + return -EINVAL;
  419. + }
  420. +
  421. + sg = &chan->desc->sg[chan->next_sg];
  422. + if (chan->desc->direction == DMA_MEM_TO_DEV) {
  423. + src_addr = sg->src_addr;
  424. + dst_addr = chan->fifo_addr;
  425. + ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED;
  426. + ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
  427. + (chan->slave_id << GDMA_REG_CTRL1_DST_REQ_SHIFT);
  428. + } else if (chan->desc->direction == DMA_DEV_TO_MEM) {
  429. + src_addr = chan->fifo_addr;
  430. + dst_addr = sg->dst_addr;
  431. + ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED;
  432. + ctrl1 = (chan->slave_id << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
  433. + (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) | \
  434. + GDMA_REG_CTRL1_COHERENT;
  435. + } else if (chan->desc->direction == DMA_MEM_TO_MEM) {
  436. + src_addr = sg->src_addr;
  437. + dst_addr = sg->dst_addr;
  438. + ctrl0 = GDMA_REG_CTRL0_SW_MODE;
  439. + ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
  440. + (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) | \
  441. + GDMA_REG_CTRL1_COHERENT;
  442. + } else {
  443. + dev_err(dma_dev->ddev.dev, "direction type %d error\n",
  444. + chan->desc->direction);
  445. + return -EINVAL;
  446. + }
  447. +
  448. + ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | \
  449. + (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) | \
  450. + GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE;
  451. + ctrl1 |= chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
  452. +
  453. + chan->next_sg++;
  454. + gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
  455. + gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
  456. + gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
  457. +
  458. + /* make sure next_sg is update */
  459. + wmb();
  460. + gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
  461. +
  462. + return 0;
  463. +}
  464. +
  465. +static inline int gdma_start_transfer(struct gdma_dma_dev *dma_dev,
  466. + struct gdma_dmaengine_chan *chan)
  467. +{
  468. + return dma_dev->data->start_transfer(chan);
  469. +}
  470. +
  471. +static int gdma_next_desc(struct gdma_dmaengine_chan *chan)
  472. +{
  473. + struct virt_dma_desc *vdesc;
  474. +
  475. + vdesc = vchan_next_desc(&chan->vchan);
  476. + if (!vdesc) {
  477. + chan->desc = NULL;
  478. + return 0;
  479. + }
  480. + chan->desc = to_gdma_dma_desc(vdesc);
  481. + chan->next_sg = 0;
  482. +
  483. + return 1;
  484. +}
  485. +
  486. +static void gdma_dma_chan_irq(struct gdma_dma_dev *dma_dev,
  487. + struct gdma_dmaengine_chan *chan)
  488. +{
  489. + struct gdma_dma_desc *desc;
  490. + unsigned long flags;
  491. + int chan_issued;
  492. +
  493. + chan_issued = 0;
  494. + spin_lock_irqsave(&chan->vchan.lock, flags);
  495. + desc = chan->desc;
  496. + if (desc) {
  497. + if (desc->cyclic) {
  498. + vchan_cyclic_callback(&desc->vdesc);
  499. + if (chan->next_sg == desc->num_sgs)
  500. + chan->next_sg = 0;
  501. + chan_issued = 1;
  502. + } else {
  503. + desc->residue -= desc->sg[chan->next_sg - 1].len;
  504. + if (chan->next_sg == desc->num_sgs) {
  505. + list_del(&desc->vdesc.node);
  506. + vchan_cookie_complete(&desc->vdesc);
  507. + chan_issued = gdma_next_desc(chan);
  508. + } else
  509. + chan_issued = 1;
  510. + }
  511. + } else
  512. + dev_dbg(dma_dev->ddev.dev, "chan %d no desc to complete\n",
  513. + chan->id);
  514. + if (chan_issued)
  515. + set_bit(chan->id, &dma_dev->chan_issued);
  516. + spin_unlock_irqrestore(&chan->vchan.lock, flags);
  517. +}
  518. +
  519. +static irqreturn_t gdma_dma_irq(int irq, void *devid)
  520. +{
  521. + struct gdma_dma_dev *dma_dev = devid;
  522. + u32 done, done_reg;
  523. + unsigned int i;
  524. +
  525. + done_reg = dma_dev->data->done_int_reg;
  526. + done = gdma_dma_read(dma_dev, done_reg);
  527. + if (unlikely(!done))
  528. + return IRQ_NONE;
  529. +
  530. + /* clean done bits */
  531. + gdma_dma_write(dma_dev, done_reg, done);
  532. +
  533. + i = 0;
  534. + while (done) {
  535. + if (done & 0x1) {
  536. + gdma_dma_chan_irq(dma_dev, &dma_dev->chan[i]);
  537. + atomic_dec(&dma_dev->cnt);
  538. + }
  539. + done >>= 1;
  540. + i++;
  541. + }
  542. +
  543. + /* start only have work to do */
  544. + if (dma_dev->chan_issued)
  545. + tasklet_schedule(&dma_dev->task);
  546. +
  547. + return IRQ_HANDLED;
  548. +}
  549. +
  550. +static void gdma_dma_issue_pending(struct dma_chan *c)
  551. +{
  552. + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
  553. + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
  554. + unsigned long flags;
  555. +
  556. + spin_lock_irqsave(&chan->vchan.lock, flags);
  557. + if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
  558. + if (gdma_next_desc(chan)) {
  559. + set_bit(chan->id, &dma_dev->chan_issued);
  560. + tasklet_schedule(&dma_dev->task);
  561. + } else
  562. + dev_dbg(dma_dev->ddev.dev, "chan %d no desc to issue\n",
  563. + chan->id);
  564. + }
  565. + spin_unlock_irqrestore(&chan->vchan.lock, flags);
  566. +}
  567. +
  568. +static struct dma_async_tx_descriptor *gdma_dma_prep_slave_sg(
  569. + struct dma_chan *c, struct scatterlist *sgl,
  570. + unsigned int sg_len, enum dma_transfer_direction direction,
  571. + unsigned long flags, void *context)
  572. +{
  573. + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
  574. + struct gdma_dma_desc *desc;
  575. + struct scatterlist *sg;
  576. + unsigned int i;
  577. +
  578. + desc = gdma_dma_alloc_desc(sg_len);
  579. + if (!desc) {
  580. + dev_err(c->device->dev, "alloc sg decs error\n");
  581. + return NULL;
  582. + }
  583. + desc->residue = 0;
  584. +
  585. + for_each_sg(sgl, sg, sg_len, i) {
  586. + if (direction == DMA_MEM_TO_DEV)
  587. + desc->sg[i].src_addr = sg_dma_address(sg);
  588. + else if (direction == DMA_DEV_TO_MEM)
  589. + desc->sg[i].dst_addr = sg_dma_address(sg);
  590. + else {
  591. + dev_err(c->device->dev, "direction type %d error\n",
  592. + direction);
  593. + goto free_desc;
  594. + }
  595. +
  596. + if (unlikely(sg_dma_len(sg) > GDMA_REG_CTRL0_TX_MASK)) {
  597. + dev_err(c->device->dev, "sg len too large %d\n",
  598. + sg_dma_len(sg));
  599. + goto free_desc;
  600. + }
  601. + desc->sg[i].len = sg_dma_len(sg);
  602. + desc->residue += sg_dma_len(sg);
  603. + }
  604. +
  605. + desc->num_sgs = sg_len;
  606. + desc->direction = direction;
  607. + desc->cyclic = false;
  608. +
  609. + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  610. +
  611. +free_desc:
  612. + kfree(desc);
  613. + return NULL;
  614. +}
  615. +
  616. +static struct dma_async_tx_descriptor * gdma_dma_prep_dma_memcpy(
  617. + struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
  618. + size_t len, unsigned long flags)
  619. +{
  620. + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
  621. + struct gdma_dma_desc *desc;
  622. + unsigned int num_periods, i;
  623. + size_t xfer_count;
  624. +
  625. + if (len <= 0)
  626. + return NULL;
  627. +
  628. + chan->burst_size = gdma_dma_maxburst(len >> 2);
  629. +
  630. + xfer_count = GDMA_REG_CTRL0_TX_MASK;
  631. + num_periods = DIV_ROUND_UP(len, xfer_count);
  632. +
  633. + desc = gdma_dma_alloc_desc(num_periods);
  634. + if (!desc) {
  635. + dev_err(c->device->dev, "alloc memcpy decs error\n");
  636. + return NULL;
  637. + }
  638. + desc->residue = len;
  639. +
  640. + for (i = 0; i < num_periods; i++) {
  641. + desc->sg[i].src_addr = src;
  642. + desc->sg[i].dst_addr = dest;
  643. + if (len > xfer_count) {
  644. + desc->sg[i].len = xfer_count;
  645. + } else {
  646. + desc->sg[i].len = len;
  647. + }
  648. + src += desc->sg[i].len;
  649. + dest += desc->sg[i].len;
  650. + len -= desc->sg[i].len;
  651. + }
  652. +
  653. + desc->num_sgs = num_periods;
  654. + desc->direction = DMA_MEM_TO_MEM;
  655. + desc->cyclic = false;
  656. +
  657. + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  658. +}
  659. +
  660. +static struct dma_async_tx_descriptor *gdma_dma_prep_dma_cyclic(
  661. + struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
  662. + size_t period_len, enum dma_transfer_direction direction,
  663. + unsigned long flags)
  664. +{
  665. + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
  666. + struct gdma_dma_desc *desc;
  667. + unsigned int num_periods, i;
  668. +
  669. + if (buf_len % period_len)
  670. + return NULL;
  671. +
  672. + if (period_len > GDMA_REG_CTRL0_TX_MASK) {
  673. + dev_err(c->device->dev, "cyclic len too large %d\n",
  674. + period_len);
  675. + return NULL;
  676. + }
  677. +
  678. + num_periods = buf_len / period_len;
  679. + desc = gdma_dma_alloc_desc(num_periods);
  680. + if (!desc) {
  681. + dev_err(c->device->dev, "alloc cyclic decs error\n");
  682. + return NULL;
  683. + }
  684. + desc->residue = buf_len;
  685. +
  686. + for (i = 0; i < num_periods; i++) {
  687. + if (direction == DMA_MEM_TO_DEV)
  688. + desc->sg[i].src_addr = buf_addr;
  689. + else if (direction == DMA_DEV_TO_MEM)
  690. + desc->sg[i].dst_addr = buf_addr;
  691. + else {
  692. + dev_err(c->device->dev, "direction type %d error\n",
  693. + direction);
  694. + goto free_desc;
  695. + }
  696. + desc->sg[i].len = period_len;
  697. + buf_addr += period_len;
  698. + }
  699. +
  700. + desc->num_sgs = num_periods;
  701. + desc->direction = direction;
  702. + desc->cyclic = true;
  703. +
  704. + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  705. +
  706. +free_desc:
  707. + kfree(desc);
  708. + return NULL;
  709. +}
  710. +
  711. +static enum dma_status gdma_dma_tx_status(struct dma_chan *c,
  712. + dma_cookie_t cookie, struct dma_tx_state *state)
  713. +{
  714. + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
  715. + struct virt_dma_desc *vdesc;
  716. + enum dma_status status;
  717. + unsigned long flags;
  718. + struct gdma_dma_desc *desc;
  719. +
  720. + status = dma_cookie_status(c, cookie, state);
  721. + if (status == DMA_COMPLETE || !state)
  722. + return status;
  723. +
  724. + spin_lock_irqsave(&chan->vchan.lock, flags);
  725. + desc = chan->desc;
  726. + if (desc && (cookie == desc->vdesc.tx.cookie)) {
  727. + /*
  728. + * We never update edesc->residue in the cyclic case, so we
  729. + * can tell the remaining room to the end of the circular
  730. + * buffer.
  731. + */
  732. + if (desc->cyclic)
  733. + state->residue = desc->residue -
  734. + ((chan->next_sg - 1) * desc->sg[0].len);
  735. + else
  736. + state->residue = desc->residue;
  737. + } else if ((vdesc = vchan_find_desc(&chan->vchan, cookie)))
  738. + state->residue = to_gdma_dma_desc(vdesc)->residue;
  739. + spin_unlock_irqrestore(&chan->vchan.lock, flags);
  740. +
  741. + dev_dbg(c->device->dev, "tx residue %d bytes\n", state->residue);
  742. +
  743. + return status;
  744. +}
  745. +
  746. +static void gdma_dma_free_chan_resources(struct dma_chan *c)
  747. +{
  748. + vchan_free_chan_resources(to_virt_chan(c));
  749. +}
  750. +
  751. +static void gdma_dma_desc_free(struct virt_dma_desc *vdesc)
  752. +{
  753. + kfree(container_of(vdesc, struct gdma_dma_desc, vdesc));
  754. +}
  755. +
  756. +static void gdma_dma_tasklet(unsigned long arg)
  757. +{
  758. + struct gdma_dma_dev *dma_dev = (struct gdma_dma_dev *)arg;
  759. + struct gdma_dmaengine_chan *chan;
  760. + static unsigned int last_chan;
  761. + unsigned int i, chan_mask;
  762. +
  763. + /* record last chan to round robin all chans */
  764. + i = last_chan;
  765. + chan_mask = dma_dev->data->chancnt - 1;
  766. + do {
  767. + /*
  768. + * on mt7621. when verify with dmatest with all
  769. + * channel is enable. we need to limit only two
  770. + * channel is working at the same time. otherwise the
  771. + * data will have problem.
  772. + */
  773. + if (atomic_read(&dma_dev->cnt) >= 2) {
  774. + last_chan = i;
  775. + break;
  776. + }
  777. +
  778. + if (test_and_clear_bit(i, &dma_dev->chan_issued)) {
  779. + chan = &dma_dev->chan[i];
  780. + if (chan->desc) {
  781. + atomic_inc(&dma_dev->cnt);
  782. + gdma_start_transfer(dma_dev, chan);
  783. + } else
  784. + dev_dbg(dma_dev->ddev.dev, "chan %d no desc to issue\n", chan->id);
  785. +
  786. + if (!dma_dev->chan_issued)
  787. + break;
  788. + }
  789. +
  790. + i = (i + 1) & chan_mask;
  791. + } while (i != last_chan);
  792. +}
  793. +
  794. +static void rt305x_gdma_init(struct gdma_dma_dev *dma_dev)
  795. +{
  796. + uint32_t gct;
  797. +
  798. + /* all chans round robin */
  799. + gdma_dma_write(dma_dev, GDMA_RT305X_GCT, GDMA_REG_GCT_ARBIT_RR);
  800. +
  801. + gct = gdma_dma_read(dma_dev, GDMA_RT305X_GCT);
  802. + dev_info(dma_dev->ddev.dev, "revision: %d, channels: %d\n",
  803. + (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
  804. + 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) &
  805. + GDMA_REG_GCT_CHAN_MASK));
  806. +}
  807. +
  808. +static void rt3883_gdma_init(struct gdma_dma_dev *dma_dev)
  809. +{
  810. + uint32_t gct;
  811. +
  812. + /* all chans round robin */
  813. + gdma_dma_write(dma_dev, GDMA_REG_GCT, GDMA_REG_GCT_ARBIT_RR);
  814. +
  815. + gct = gdma_dma_read(dma_dev, GDMA_REG_GCT);
  816. + dev_info(dma_dev->ddev.dev, "revision: %d, channels: %d\n",
  817. + (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
  818. + 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) &
  819. + GDMA_REG_GCT_CHAN_MASK));
  820. +}
  821. +
  822. +static struct gdma_data rt305x_gdma_data = {
  823. + .chancnt = 8,
  824. + .done_int_reg = GDMA_RT305X_STATUS_INT,
  825. + .init = rt305x_gdma_init,
  826. + .start_transfer = rt305x_gdma_start_transfer,
  827. +};
  828. +
  829. +static struct gdma_data rt3883_gdma_data = {
  830. + .chancnt = 16,
  831. + .done_int_reg = GDMA_REG_DONE_INT,
  832. + .init = rt3883_gdma_init,
  833. + .start_transfer = rt3883_gdma_start_transfer,
  834. +};
  835. +
  836. +static const struct of_device_id gdma_of_match_table[] = {
  837. + { .compatible = "ralink,rt305x-gdma", .data = &rt305x_gdma_data },
  838. + { .compatible = "ralink,rt3883-gdma", .data = &rt3883_gdma_data },
  839. + { },
  840. +};
  841. +
  842. +static int gdma_dma_probe(struct platform_device *pdev)
  843. +{
  844. + const struct of_device_id *match;
  845. + struct gdma_dmaengine_chan *chan;
  846. + struct gdma_dma_dev *dma_dev;
  847. + struct dma_device *dd;
  848. + unsigned int i;
  849. + struct resource *res;
  850. + int ret;
  851. + int irq;
  852. + void __iomem *base;
  853. + struct gdma_data *data;
  854. +
  855. + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  856. + if (ret)
  857. + return ret;
  858. +
  859. + match = of_match_device(gdma_of_match_table, &pdev->dev);
  860. + if (!match)
  861. + return -EINVAL;
  862. + data = (struct gdma_data *) match->data;
  863. +
  864. + dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev) +
  865. + (sizeof(struct gdma_dmaengine_chan) * data->chancnt),
  866. + GFP_KERNEL);
  867. + if (!dma_dev) {
  868. + dev_err(&pdev->dev, "alloc dma device failed\n");
  869. + return -EINVAL;
  870. + }
  871. + dma_dev->data = data;
  872. +
  873. + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  874. + base = devm_ioremap_resource(&pdev->dev, res);
  875. + if (IS_ERR(base))
  876. + return PTR_ERR(base);
  877. + dma_dev->base = base;
  878. + tasklet_init(&dma_dev->task, gdma_dma_tasklet, (unsigned long)dma_dev);
  879. +
  880. + irq = platform_get_irq(pdev, 0);
  881. + if (irq < 0) {
  882. + dev_err(&pdev->dev, "failed to get irq\n");
  883. + return -EINVAL;
  884. + }
  885. + ret = devm_request_irq(&pdev->dev, irq, gdma_dma_irq,
  886. + 0, dev_name(&pdev->dev), dma_dev);
  887. + if (ret) {
  888. + dev_err(&pdev->dev, "failed to request irq\n");
  889. + return ret;
  890. + }
  891. +
  892. + device_reset(&pdev->dev);
  893. +
  894. + dd = &dma_dev->ddev;
  895. + dma_cap_set(DMA_MEMCPY, dd->cap_mask);
  896. + dma_cap_set(DMA_SLAVE, dd->cap_mask);
  897. + dma_cap_set(DMA_CYCLIC, dd->cap_mask);
  898. + dd->device_free_chan_resources = gdma_dma_free_chan_resources;
  899. + dd->device_prep_dma_memcpy = gdma_dma_prep_dma_memcpy;
  900. + dd->device_prep_slave_sg = gdma_dma_prep_slave_sg;
  901. + dd->device_prep_dma_cyclic = gdma_dma_prep_dma_cyclic;
  902. + dd->device_config = gdma_dma_config;
  903. + dd->device_terminate_all = gdma_dma_terminate_all;
  904. + dd->device_tx_status = gdma_dma_tx_status;
  905. + dd->device_issue_pending = gdma_dma_issue_pending;
  906. +
  907. + dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  908. + dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  909. + dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  910. + dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
  911. +
  912. + dd->dev = &pdev->dev;
  913. + dd->dev->dma_parms = &dma_dev->dma_parms;
  914. + dma_set_max_seg_size(dd->dev, GDMA_REG_CTRL0_TX_MASK);
  915. + INIT_LIST_HEAD(&dd->channels);
  916. +
  917. + for (i = 0; i < data->chancnt; i++) {
  918. + chan = &dma_dev->chan[i];
  919. + chan->id = i;
  920. + chan->vchan.desc_free = gdma_dma_desc_free;
  921. + vchan_init(&chan->vchan, dd);
  922. + }
  923. +
  924. + /* init hardware */
  925. + data->init(dma_dev);
  926. +
  927. + ret = dma_async_device_register(dd);
  928. + if (ret) {
  929. + dev_err(&pdev->dev, "failed to register dma device\n");
  930. + return ret;
  931. + }
  932. +
  933. + ret = of_dma_controller_register(pdev->dev.of_node,
  934. + of_dma_xlate_by_chan_id, dma_dev);
  935. + if (ret) {
  936. + dev_err(&pdev->dev, "failed to register of dma controller\n");
  937. + goto err_unregister;
  938. + }
  939. +
  940. + platform_set_drvdata(pdev, dma_dev);
  941. +
  942. + return 0;
  943. +
  944. +err_unregister:
  945. + dma_async_device_unregister(dd);
  946. + return ret;
  947. +}
  948. +
  949. +static int gdma_dma_remove(struct platform_device *pdev)
  950. +{
  951. + struct gdma_dma_dev *dma_dev = platform_get_drvdata(pdev);
  952. +
  953. + tasklet_kill(&dma_dev->task);
  954. + of_dma_controller_free(pdev->dev.of_node);
  955. + dma_async_device_unregister(&dma_dev->ddev);
  956. +
  957. + return 0;
  958. +}
  959. +
  960. +static struct platform_driver gdma_dma_driver = {
  961. + .probe = gdma_dma_probe,
  962. + .remove = gdma_dma_remove,
  963. + .driver = {
  964. + .name = "gdma-rt2880",
  965. + .of_match_table = gdma_of_match_table,
  966. + },
  967. +};
  968. +module_platform_driver(gdma_dma_driver);
  969. +
  970. +MODULE_AUTHOR("Lars-Peter Clausen <[email protected]>");
  971. +MODULE_DESCRIPTION("Ralink/MTK DMA driver");
  972. +MODULE_LICENSE("GPL v2");
  973. --- a/include/linux/dmaengine.h
  974. +++ b/include/linux/dmaengine.h
  975. @@ -525,6 +525,7 @@ static inline void dma_set_unmap(struct
  976. struct dmaengine_unmap_data *
  977. dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
  978. void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
  979. +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
  980. #else
  981. static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
  982. struct dmaengine_unmap_data *unmap)
  983. --- /dev/null
  984. +++ b/drivers/dma/mtk-hsdma.c
  985. @@ -0,0 +1,767 @@
  986. +/*
  987. + * Copyright (C) 2015, Michael Lee <[email protected]>
  988. + * MTK HSDMA support
  989. + *
  990. + * This program is free software; you can redistribute it and/or modify it
  991. + * under the terms of the GNU General Public License as published by the
  992. + * Free Software Foundation; either version 2 of the License, or (at your
  993. + * option) any later version.
  994. + *
  995. + */
  996. +
  997. +#include <linux/dmaengine.h>
  998. +#include <linux/dma-mapping.h>
  999. +#include <linux/err.h>
  1000. +#include <linux/init.h>
  1001. +#include <linux/list.h>
  1002. +#include <linux/module.h>
  1003. +#include <linux/platform_device.h>
  1004. +#include <linux/slab.h>
  1005. +#include <linux/spinlock.h>
  1006. +#include <linux/irq.h>
  1007. +#include <linux/of_dma.h>
  1008. +#include <linux/reset.h>
  1009. +#include <linux/of_device.h>
  1010. +
  1011. +#include "virt-dma.h"
  1012. +
  1013. +#define HSDMA_BASE_OFFSET 0x800
  1014. +
  1015. +#define HSDMA_REG_TX_BASE 0x00
  1016. +#define HSDMA_REG_TX_CNT 0x04
  1017. +#define HSDMA_REG_TX_CTX 0x08
  1018. +#define HSDMA_REG_TX_DTX 0x0c
  1019. +#define HSDMA_REG_RX_BASE 0x100
  1020. +#define HSDMA_REG_RX_CNT 0x104
  1021. +#define HSDMA_REG_RX_CRX 0x108
  1022. +#define HSDMA_REG_RX_DRX 0x10c
  1023. +#define HSDMA_REG_INFO 0x200
  1024. +#define HSDMA_REG_GLO_CFG 0x204
  1025. +#define HSDMA_REG_RST_CFG 0x208
  1026. +#define HSDMA_REG_DELAY_INT 0x20c
  1027. +#define HSDMA_REG_FREEQ_THRES 0x210
  1028. +#define HSDMA_REG_INT_STATUS 0x220
  1029. +#define HSDMA_REG_INT_MASK 0x228
  1030. +#define HSDMA_REG_SCH_Q01 0x280
  1031. +#define HSDMA_REG_SCH_Q23 0x284
  1032. +
  1033. +#define HSDMA_DESCS_MAX 0xfff
  1034. +#define HSDMA_DESCS_NUM 8
  1035. +#define HSDMA_DESCS_MASK (HSDMA_DESCS_NUM - 1)
  1036. +#define HSDMA_NEXT_DESC(x) (((x) + 1) & HSDMA_DESCS_MASK)
  1037. +
  1038. +/* HSDMA_REG_INFO */
  1039. +#define HSDMA_INFO_INDEX_MASK 0xf
  1040. +#define HSDMA_INFO_INDEX_SHIFT 24
  1041. +#define HSDMA_INFO_BASE_MASK 0xff
  1042. +#define HSDMA_INFO_BASE_SHIFT 16
  1043. +#define HSDMA_INFO_RX_MASK 0xff
  1044. +#define HSDMA_INFO_RX_SHIFT 8
  1045. +#define HSDMA_INFO_TX_MASK 0xff
  1046. +#define HSDMA_INFO_TX_SHIFT 0
  1047. +
  1048. +/* HSDMA_REG_GLO_CFG */
  1049. +#define HSDMA_GLO_TX_2B_OFFSET BIT(31)
  1050. +#define HSDMA_GLO_CLK_GATE BIT(30)
  1051. +#define HSDMA_GLO_BYTE_SWAP BIT(29)
  1052. +#define HSDMA_GLO_MULTI_DMA BIT(10)
  1053. +#define HSDMA_GLO_TWO_BUF BIT(9)
  1054. +#define HSDMA_GLO_32B_DESC BIT(8)
  1055. +#define HSDMA_GLO_BIG_ENDIAN BIT(7)
  1056. +#define HSDMA_GLO_TX_DONE BIT(6)
  1057. +#define HSDMA_GLO_BT_MASK 0x3
  1058. +#define HSDMA_GLO_BT_SHIFT 4
  1059. +#define HSDMA_GLO_RX_BUSY BIT(3)
  1060. +#define HSDMA_GLO_RX_DMA BIT(2)
  1061. +#define HSDMA_GLO_TX_BUSY BIT(1)
  1062. +#define HSDMA_GLO_TX_DMA BIT(0)
  1063. +
  1064. +#define HSDMA_BT_SIZE_16BYTES (0 << HSDMA_GLO_BT_SHIFT)
  1065. +#define HSDMA_BT_SIZE_32BYTES (1 << HSDMA_GLO_BT_SHIFT)
  1066. +#define HSDMA_BT_SIZE_64BYTES (2 << HSDMA_GLO_BT_SHIFT)
  1067. +#define HSDMA_BT_SIZE_128BYTES (3 << HSDMA_GLO_BT_SHIFT)
  1068. +
  1069. +#define HSDMA_GLO_DEFAULT (HSDMA_GLO_MULTI_DMA | \
  1070. + HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES)
  1071. +
  1072. +/* HSDMA_REG_RST_CFG */
  1073. +#define HSDMA_RST_RX_SHIFT 16
  1074. +#define HSDMA_RST_TX_SHIFT 0
  1075. +
  1076. +/* HSDMA_REG_DELAY_INT */
  1077. +#define HSDMA_DELAY_INT_EN BIT(15)
  1078. +#define HSDMA_DELAY_PEND_OFFSET 8
  1079. +#define HSDMA_DELAY_TIME_OFFSET 0
  1080. +#define HSDMA_DELAY_TX_OFFSET 16
  1081. +#define HSDMA_DELAY_RX_OFFSET 0
  1082. +
  1083. +#define HSDMA_DELAY_INIT(x) (HSDMA_DELAY_INT_EN | \
  1084. + ((x) << HSDMA_DELAY_PEND_OFFSET))
  1085. +#define HSDMA_DELAY(x) ((HSDMA_DELAY_INIT(x) << \
  1086. + HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x))
  1087. +
  1088. +/* HSDMA_REG_INT_STATUS */
  1089. +#define HSDMA_INT_DELAY_RX_COH BIT(31)
  1090. +#define HSDMA_INT_DELAY_RX_INT BIT(30)
  1091. +#define HSDMA_INT_DELAY_TX_COH BIT(29)
  1092. +#define HSDMA_INT_DELAY_TX_INT BIT(28)
  1093. +#define HSDMA_INT_RX_MASK 0x3
  1094. +#define HSDMA_INT_RX_SHIFT 16
  1095. +#define HSDMA_INT_RX_Q0 BIT(16)
  1096. +#define HSDMA_INT_TX_MASK 0xf
  1097. +#define HSDMA_INT_TX_SHIFT 0
  1098. +#define HSDMA_INT_TX_Q0 BIT(0)
  1099. +
  1100. +/* tx/rx dma desc flags */
  1101. +#define HSDMA_PLEN_MASK 0x3fff
  1102. +#define HSDMA_DESC_DONE BIT(31)
  1103. +#define HSDMA_DESC_LS0 BIT(30)
  1104. +#define HSDMA_DESC_PLEN0(_x) (((_x) & HSDMA_PLEN_MASK) << 16)
  1105. +#define HSDMA_DESC_TAG BIT(15)
  1106. +#define HSDMA_DESC_LS1 BIT(14)
  1107. +#define HSDMA_DESC_PLEN1(_x) ((_x) & HSDMA_PLEN_MASK)
  1108. +
  1109. +/* align 4 bytes */
  1110. +#define HSDMA_ALIGN_SIZE 3
  1111. +/* align size 128bytes */
  1112. +#define HSDMA_MAX_PLEN 0x3f80
  1113. +
  1114. +struct hsdma_desc {
  1115. + u32 addr0;
  1116. + u32 flags;
  1117. + u32 addr1;
  1118. + u32 unused;
  1119. +};
  1120. +
  1121. +struct mtk_hsdma_sg {
  1122. + dma_addr_t src_addr;
  1123. + dma_addr_t dst_addr;
  1124. + u32 len;
  1125. +};
  1126. +
  1127. +struct mtk_hsdma_desc {
  1128. + struct virt_dma_desc vdesc;
  1129. + unsigned int num_sgs;
  1130. + struct mtk_hsdma_sg sg[1];
  1131. +};
  1132. +
  1133. +struct mtk_hsdma_chan {
  1134. + struct virt_dma_chan vchan;
  1135. + unsigned int id;
  1136. + dma_addr_t desc_addr;
  1137. + int tx_idx;
  1138. + int rx_idx;
  1139. + struct hsdma_desc *tx_ring;
  1140. + struct hsdma_desc *rx_ring;
  1141. + struct mtk_hsdma_desc *desc;
  1142. + unsigned int next_sg;
  1143. +};
  1144. +
  1145. +struct mtk_hsdam_engine {
  1146. + struct dma_device ddev;
  1147. + struct device_dma_parameters dma_parms;
  1148. + void __iomem *base;
  1149. + struct tasklet_struct task;
  1150. + volatile unsigned long chan_issued;
  1151. +
  1152. + struct mtk_hsdma_chan chan[1];
  1153. +};
  1154. +
  1155. +static inline struct mtk_hsdam_engine *mtk_hsdma_chan_get_dev(
  1156. + struct mtk_hsdma_chan *chan)
  1157. +{
  1158. + return container_of(chan->vchan.chan.device, struct mtk_hsdam_engine,
  1159. + ddev);
  1160. +}
  1161. +
  1162. +static inline struct mtk_hsdma_chan *to_mtk_hsdma_chan(struct dma_chan *c)
  1163. +{
  1164. + return container_of(c, struct mtk_hsdma_chan, vchan.chan);
  1165. +}
  1166. +
  1167. +static inline struct mtk_hsdma_desc *to_mtk_hsdma_desc(
  1168. + struct virt_dma_desc *vdesc)
  1169. +{
  1170. + return container_of(vdesc, struct mtk_hsdma_desc, vdesc);
  1171. +}
  1172. +
  1173. +static inline u32 mtk_hsdma_read(struct mtk_hsdam_engine *hsdma, u32 reg)
  1174. +{
  1175. + return readl(hsdma->base + reg);
  1176. +}
  1177. +
  1178. +static inline void mtk_hsdma_write(struct mtk_hsdam_engine *hsdma,
  1179. + unsigned reg, u32 val)
  1180. +{
  1181. + writel(val, hsdma->base + reg);
  1182. +}
  1183. +
  1184. +static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma,
  1185. + struct mtk_hsdma_chan *chan)
  1186. +{
  1187. + chan->tx_idx = 0;
  1188. + chan->rx_idx = HSDMA_DESCS_NUM - 1;
  1189. +
  1190. + mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
  1191. + mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
  1192. +
  1193. + mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
  1194. + 0x1 << (chan->id + HSDMA_RST_TX_SHIFT));
  1195. + mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
  1196. + 0x1 << (chan->id + HSDMA_RST_RX_SHIFT));
  1197. +}
  1198. +
  1199. +static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
  1200. +{
  1201. + dev_dbg(hsdma->ddev.dev, "tbase %08x, tcnt %08x, " \
  1202. + "tctx %08x, tdtx: %08x, rbase %08x, " \
  1203. + "rcnt %08x, rctx %08x, rdtx %08x\n",
  1204. + mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE),
  1205. + mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT),
  1206. + mtk_hsdma_read(hsdma, HSDMA_REG_TX_CTX),
  1207. + mtk_hsdma_read(hsdma, HSDMA_REG_TX_DTX),
  1208. + mtk_hsdma_read(hsdma, HSDMA_REG_RX_BASE),
  1209. + mtk_hsdma_read(hsdma, HSDMA_REG_RX_CNT),
  1210. + mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX),
  1211. + mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX));
  1212. +
  1213. + dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, " \
  1214. + "intr_stat %08x, intr_mask %08x\n",
  1215. + mtk_hsdma_read(hsdma, HSDMA_REG_INFO),
  1216. + mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG),
  1217. + mtk_hsdma_read(hsdma, HSDMA_REG_DELAY_INT),
  1218. + mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS),
  1219. + mtk_hsdma_read(hsdma, HSDMA_REG_INT_MASK));
  1220. +}
  1221. +
  1222. +static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma,
  1223. + struct mtk_hsdma_chan *chan)
  1224. +{
  1225. + struct hsdma_desc *tx_desc;
  1226. + struct hsdma_desc *rx_desc;
  1227. + int i;
  1228. +
  1229. + dev_dbg(hsdma->ddev.dev, "tx idx: %d, rx idx: %d\n",
  1230. + chan->tx_idx, chan->rx_idx);
  1231. +
  1232. + for (i = 0; i < HSDMA_DESCS_NUM; i++) {
  1233. + tx_desc = &chan->tx_ring[i];
  1234. + rx_desc = &chan->rx_ring[i];
  1235. +
  1236. + dev_dbg(hsdma->ddev.dev, "%d tx addr0: %08x, flags %08x, " \
  1237. + "tx addr1: %08x, rx addr0 %08x, flags %08x\n",
  1238. + i, tx_desc->addr0, tx_desc->flags, \
  1239. + tx_desc->addr1, rx_desc->addr0, rx_desc->flags);
  1240. + }
  1241. +}
  1242. +
  1243. +static void mtk_hsdma_reset(struct mtk_hsdam_engine *hsdma,
  1244. + struct mtk_hsdma_chan *chan)
  1245. +{
  1246. + int i;
  1247. +
  1248. + /* disable dma */
  1249. + mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
  1250. +
  1251. + /* disable intr */
  1252. + mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
  1253. +
  1254. + /* init desc value */
  1255. + for (i = 0; i < HSDMA_DESCS_NUM; i++) {
  1256. + chan->tx_ring[i].addr0 = 0;
  1257. + chan->tx_ring[i].flags = HSDMA_DESC_LS0 |
  1258. + HSDMA_DESC_DONE;
  1259. + }
  1260. + for (i = 0; i < HSDMA_DESCS_NUM; i++) {
  1261. + chan->rx_ring[i].addr0 = 0;
  1262. + chan->rx_ring[i].flags = 0;
  1263. + }
  1264. +
  1265. + /* reset */
  1266. + mtk_hsdma_reset_chan(hsdma, chan);
  1267. +
  1268. + /* enable intr */
  1269. + mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
  1270. +
  1271. + /* enable dma */
  1272. + mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
  1273. +}
  1274. +
  1275. +static int mtk_hsdma_terminate_all(struct dma_chan *c)
  1276. +{
  1277. + struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
  1278. + struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
  1279. + unsigned long timeout;
  1280. + LIST_HEAD(head);
  1281. +
  1282. + spin_lock_bh(&chan->vchan.lock);
  1283. + chan->desc = NULL;
  1284. + clear_bit(chan->id, &hsdma->chan_issued);
  1285. + vchan_get_all_descriptors(&chan->vchan, &head);
  1286. + spin_unlock_bh(&chan->vchan.lock);
  1287. +
  1288. + vchan_dma_desc_free_list(&chan->vchan, &head);
  1289. +
  1290. + /* wait dma transfer complete */
  1291. + timeout = jiffies + msecs_to_jiffies(2000);
  1292. + while (mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG) &
  1293. + (HSDMA_GLO_RX_BUSY | HSDMA_GLO_TX_BUSY)) {
  1294. + if (time_after_eq(jiffies, timeout)) {
  1295. + hsdma_dump_desc(hsdma, chan);
  1296. + mtk_hsdma_reset(hsdma, chan);
  1297. + dev_err(hsdma->ddev.dev, "timeout, reset it\n");
  1298. + break;
  1299. + }
  1300. + cpu_relax();
  1301. + }
  1302. +
  1303. + return 0;
  1304. +}
  1305. +
  1306. +static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
  1307. + struct mtk_hsdma_chan *chan)
  1308. +{
  1309. + dma_addr_t src, dst;
  1310. + size_t len, tlen;
  1311. + struct hsdma_desc *tx_desc, *rx_desc;
  1312. + struct mtk_hsdma_sg *sg;
  1313. + unsigned int i;
  1314. + int rx_idx;
  1315. +
  1316. + sg = &chan->desc->sg[0];
  1317. + len = sg->len;
  1318. + chan->desc->num_sgs = DIV_ROUND_UP(len, HSDMA_MAX_PLEN);
  1319. +
  1320. + /* tx desc */
  1321. + src = sg->src_addr;
  1322. + for (i = 0; i < chan->desc->num_sgs; i++) {
  1323. + if (len > HSDMA_MAX_PLEN)
  1324. + tlen = HSDMA_MAX_PLEN;
  1325. + else
  1326. + tlen = len;
  1327. +
  1328. + if (i & 0x1) {
  1329. + tx_desc->addr1 = src;
  1330. + tx_desc->flags |= HSDMA_DESC_PLEN1(tlen);
  1331. + } else {
  1332. + tx_desc = &chan->tx_ring[chan->tx_idx];
  1333. + tx_desc->addr0 = src;
  1334. + tx_desc->flags = HSDMA_DESC_PLEN0(tlen);
  1335. +
  1336. + /* update index */
  1337. + chan->tx_idx = HSDMA_NEXT_DESC(chan->tx_idx);
  1338. + }
  1339. +
  1340. + src += tlen;
  1341. + len -= tlen;
  1342. + }
  1343. + if (i & 0x1)
  1344. + tx_desc->flags |= HSDMA_DESC_LS0;
  1345. + else
  1346. + tx_desc->flags |= HSDMA_DESC_LS1;
  1347. +
  1348. + /* rx desc */
  1349. + rx_idx = HSDMA_NEXT_DESC(chan->rx_idx);
  1350. + len = sg->len;
  1351. + dst = sg->dst_addr;
  1352. + for (i = 0; i < chan->desc->num_sgs; i++) {
  1353. + rx_desc = &chan->rx_ring[rx_idx];
  1354. + if (len > HSDMA_MAX_PLEN)
  1355. + tlen = HSDMA_MAX_PLEN;
  1356. + else
  1357. + tlen = len;
  1358. +
  1359. + rx_desc->addr0 = dst;
  1360. + rx_desc->flags = HSDMA_DESC_PLEN0(tlen);
  1361. +
  1362. + dst += tlen;
  1363. + len -= tlen;
  1364. +
  1365. + /* update index */
  1366. + rx_idx = HSDMA_NEXT_DESC(rx_idx);
  1367. + }
  1368. +
  1369. + /* make sure desc and index all up to date */
  1370. + wmb();
  1371. + mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
  1372. +
  1373. + return 0;
  1374. +}
  1375. +
  1376. +static int gdma_next_desc(struct mtk_hsdma_chan *chan)
  1377. +{
  1378. + struct virt_dma_desc *vdesc;
  1379. +
  1380. + vdesc = vchan_next_desc(&chan->vchan);
  1381. + if (!vdesc) {
  1382. + chan->desc = NULL;
  1383. + return 0;
  1384. + }
  1385. + chan->desc = to_mtk_hsdma_desc(vdesc);
  1386. + chan->next_sg = 0;
  1387. +
  1388. + return 1;
  1389. +}
  1390. +
  1391. +static void mtk_hsdma_chan_done(struct mtk_hsdam_engine *hsdma,
  1392. + struct mtk_hsdma_chan *chan)
  1393. +{
  1394. + struct mtk_hsdma_desc *desc;
  1395. + int chan_issued;
  1396. +
  1397. + chan_issued = 0;
  1398. + spin_lock_bh(&chan->vchan.lock);
  1399. + desc = chan->desc;
  1400. + if (likely(desc)) {
  1401. + if (chan->next_sg == desc->num_sgs) {
  1402. + list_del(&desc->vdesc.node);
  1403. + vchan_cookie_complete(&desc->vdesc);
  1404. + chan_issued = gdma_next_desc(chan);
  1405. + }
  1406. + } else
  1407. + dev_dbg(hsdma->ddev.dev, "no desc to complete\n");
  1408. +
  1409. + if (chan_issued)
  1410. + set_bit(chan->id, &hsdma->chan_issued);
  1411. + spin_unlock_bh(&chan->vchan.lock);
  1412. +}
  1413. +
  1414. +static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
  1415. +{
  1416. + struct mtk_hsdam_engine *hsdma = devid;
  1417. + u32 status;
  1418. +
  1419. + status = mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS);
  1420. + if (unlikely(!status))
  1421. + return IRQ_NONE;
  1422. +
  1423. + if (likely(status & HSDMA_INT_RX_Q0))
  1424. + tasklet_schedule(&hsdma->task);
  1425. + else
  1426. + dev_dbg(hsdma->ddev.dev, "unhandle irq status %08x\n",
  1427. + status);
  1428. + /* clean intr bits */
  1429. + mtk_hsdma_write(hsdma, HSDMA_REG_INT_STATUS, status);
  1430. +
  1431. + return IRQ_HANDLED;
  1432. +}
  1433. +
  1434. +static void mtk_hsdma_issue_pending(struct dma_chan *c)
  1435. +{
  1436. + struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
  1437. + struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
  1438. +
  1439. + spin_lock_bh(&chan->vchan.lock);
  1440. + if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
  1441. + if (gdma_next_desc(chan)) {
  1442. + set_bit(chan->id, &hsdma->chan_issued);
  1443. + tasklet_schedule(&hsdma->task);
  1444. + } else
  1445. + dev_dbg(hsdma->ddev.dev, "no desc to issue\n");
  1446. + }
  1447. + spin_unlock_bh(&chan->vchan.lock);
  1448. +}
  1449. +
  1450. +static struct dma_async_tx_descriptor * mtk_hsdma_prep_dma_memcpy(
  1451. + struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
  1452. + size_t len, unsigned long flags)
  1453. +{
  1454. + struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
  1455. + struct mtk_hsdma_desc *desc;
  1456. +
  1457. + if (len <= 0)
  1458. + return NULL;
  1459. +
  1460. + desc = kzalloc(sizeof(struct mtk_hsdma_desc), GFP_ATOMIC);
  1461. + if (!desc) {
  1462. + dev_err(c->device->dev, "alloc memcpy decs error\n");
  1463. + return NULL;
  1464. + }
  1465. +
  1466. + desc->sg[0].src_addr = src;
  1467. + desc->sg[0].dst_addr = dest;
  1468. + desc->sg[0].len = len;
  1469. +
  1470. + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  1471. +}
  1472. +
  1473. +static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c,
  1474. + dma_cookie_t cookie, struct dma_tx_state *state)
  1475. +{
  1476. + return dma_cookie_status(c, cookie, state);
  1477. +}
  1478. +
  1479. +static void mtk_hsdma_free_chan_resources(struct dma_chan *c)
  1480. +{
  1481. + vchan_free_chan_resources(to_virt_chan(c));
  1482. +}
  1483. +
  1484. +static void mtk_hsdma_desc_free(struct virt_dma_desc *vdesc)
  1485. +{
  1486. + kfree(container_of(vdesc, struct mtk_hsdma_desc, vdesc));
  1487. +}
  1488. +
  1489. +static void mtk_hsdma_tx(struct mtk_hsdam_engine *hsdma)
  1490. +{
  1491. + struct mtk_hsdma_chan *chan;
  1492. +
  1493. + if (test_and_clear_bit(0, &hsdma->chan_issued)) {
  1494. + chan = &hsdma->chan[0];
  1495. + if (chan->desc) {
  1496. + mtk_hsdma_start_transfer(hsdma, chan);
  1497. + } else
  1498. + dev_dbg(hsdma->ddev.dev,"chan 0 no desc to issue\n");
  1499. + }
  1500. +}
  1501. +
  1502. +static void mtk_hsdma_rx(struct mtk_hsdam_engine *hsdma)
  1503. +{
  1504. + struct mtk_hsdma_chan *chan;
  1505. + int next_idx, drx_idx, cnt;
  1506. +
  1507. + chan = &hsdma->chan[0];
  1508. + next_idx = HSDMA_NEXT_DESC(chan->rx_idx);
  1509. + drx_idx = mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX);
  1510. +
  1511. + cnt = (drx_idx - next_idx) & HSDMA_DESCS_MASK;
  1512. + if (!cnt)
  1513. + return;
  1514. +
  1515. + chan->next_sg += cnt;
  1516. + chan->rx_idx = (chan->rx_idx + cnt) & HSDMA_DESCS_MASK;
  1517. +
  1518. + /* update rx crx */
  1519. + wmb();
  1520. + mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
  1521. +
  1522. + mtk_hsdma_chan_done(hsdma, chan);
  1523. +}
  1524. +
  1525. +static void mtk_hsdma_tasklet(unsigned long arg)
  1526. +{
  1527. + struct mtk_hsdam_engine *hsdma = (struct mtk_hsdam_engine *)arg;
  1528. +
  1529. + mtk_hsdma_rx(hsdma);
  1530. + mtk_hsdma_tx(hsdma);
  1531. +}
  1532. +
  1533. +static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma,
  1534. + struct mtk_hsdma_chan *chan)
  1535. +{
  1536. + int i;
  1537. +
  1538. + chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev,
  1539. + 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
  1540. + &chan->desc_addr, GFP_ATOMIC | __GFP_ZERO);
  1541. + if (!chan->tx_ring)
  1542. + goto no_mem;
  1543. +
  1544. + chan->rx_ring = &chan->tx_ring[HSDMA_DESCS_NUM];
  1545. +
  1546. + /* init tx ring value */
  1547. + for (i = 0; i < HSDMA_DESCS_NUM; i++)
  1548. + chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
  1549. +
  1550. + return 0;
  1551. +no_mem:
  1552. + return -ENOMEM;
  1553. +}
  1554. +
  1555. +static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma,
  1556. + struct mtk_hsdma_chan *chan)
  1557. +{
  1558. + if (chan->tx_ring) {
  1559. + dma_free_coherent(hsdma->ddev.dev,
  1560. + 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
  1561. + chan->tx_ring, chan->desc_addr);
  1562. + chan->tx_ring = NULL;
  1563. + chan->rx_ring = NULL;
  1564. + }
  1565. +}
  1566. +
  1567. +static int mtk_hsdma_init(struct mtk_hsdam_engine *hsdma)
  1568. +{
  1569. + struct mtk_hsdma_chan *chan;
  1570. + int ret;
  1571. + u32 reg;
  1572. +
  1573. + /* init desc */
  1574. + chan = &hsdma->chan[0];
  1575. + ret = mtk_hsdam_alloc_desc(hsdma, chan);
  1576. + if (ret)
  1577. + return ret;
  1578. +
  1579. + /* tx */
  1580. + mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, chan->desc_addr);
  1581. + mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, HSDMA_DESCS_NUM);
  1582. + /* rx */
  1583. + mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, chan->desc_addr +
  1584. + (sizeof(struct hsdma_desc) * HSDMA_DESCS_NUM));
  1585. + mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, HSDMA_DESCS_NUM);
  1586. + /* reset */
  1587. + mtk_hsdma_reset_chan(hsdma, chan);
  1588. +
  1589. + /* enable rx intr */
  1590. + mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
  1591. +
  1592. + /* enable dma */
  1593. + mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
  1594. +
  1595. + /* hardware info */
  1596. + reg = mtk_hsdma_read(hsdma, HSDMA_REG_INFO);
  1597. + dev_info(hsdma->ddev.dev, "rx: %d, tx: %d\n",
  1598. + (reg >> HSDMA_INFO_RX_SHIFT) & HSDMA_INFO_RX_MASK,
  1599. + (reg >> HSDMA_INFO_TX_SHIFT) & HSDMA_INFO_TX_MASK);
  1600. +
  1601. + hsdma_dump_reg(hsdma);
  1602. +
  1603. + return ret;
  1604. +}
  1605. +
  1606. +static void mtk_hsdma_uninit(struct mtk_hsdam_engine *hsdma)
  1607. +{
  1608. + struct mtk_hsdma_chan *chan;
  1609. +
  1610. + /* disable dma */
  1611. + mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
  1612. +
  1613. + /* disable intr */
  1614. + mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
  1615. +
  1616. + /* free desc */
  1617. + chan = &hsdma->chan[0];
  1618. + mtk_hsdam_free_desc(hsdma, chan);
  1619. +
  1620. + /* tx */
  1621. + mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, 0);
  1622. + mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, 0);
  1623. + /* rx */
  1624. + mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, 0);
  1625. + mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, 0);
  1626. + /* reset */
  1627. + mtk_hsdma_reset_chan(hsdma, chan);
  1628. +}
  1629. +
  1630. +static const struct of_device_id mtk_hsdma_of_match[] = {
  1631. + { .compatible = "mediatek,mt7621-hsdma" },
  1632. + { },
  1633. +};
  1634. +
  1635. +static int mtk_hsdma_probe(struct platform_device *pdev)
  1636. +{
  1637. + const struct of_device_id *match;
  1638. + struct mtk_hsdma_chan *chan;
  1639. + struct mtk_hsdam_engine *hsdma;
  1640. + struct dma_device *dd;
  1641. + struct resource *res;
  1642. + int ret;
  1643. + int irq;
  1644. + void __iomem *base;
  1645. +
  1646. + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  1647. + if (ret)
  1648. + return ret;
  1649. +
  1650. + match = of_match_device(mtk_hsdma_of_match, &pdev->dev);
  1651. + if (!match)
  1652. + return -EINVAL;
  1653. +
  1654. + hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
  1655. + if (!hsdma) {
  1656. + dev_err(&pdev->dev, "alloc dma device failed\n");
  1657. + return -EINVAL;
  1658. + }
  1659. +
  1660. + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1661. + base = devm_ioremap_resource(&pdev->dev, res);
  1662. + if (IS_ERR(base))
  1663. + return PTR_ERR(base);
  1664. + hsdma->base = base + HSDMA_BASE_OFFSET;
  1665. + tasklet_init(&hsdma->task, mtk_hsdma_tasklet, (unsigned long)hsdma);
  1666. +
  1667. + irq = platform_get_irq(pdev, 0);
  1668. + if (irq < 0) {
  1669. + dev_err(&pdev->dev, "failed to get irq\n");
  1670. + return -EINVAL;
  1671. + }
  1672. + ret = devm_request_irq(&pdev->dev, irq, mtk_hsdma_irq,
  1673. + 0, dev_name(&pdev->dev), hsdma);
  1674. + if (ret) {
  1675. + dev_err(&pdev->dev, "failed to request irq\n");
  1676. + return ret;
  1677. + }
  1678. +
  1679. + device_reset(&pdev->dev);
  1680. +
  1681. + dd = &hsdma->ddev;
  1682. + dma_cap_set(DMA_MEMCPY, dd->cap_mask);
  1683. + dd->copy_align = HSDMA_ALIGN_SIZE;
  1684. + dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
  1685. + dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
  1686. + dd->device_terminate_all = mtk_hsdma_terminate_all;
  1687. + dd->device_tx_status = mtk_hsdma_tx_status;
  1688. + dd->device_issue_pending = mtk_hsdma_issue_pending;
  1689. + dd->dev = &pdev->dev;
  1690. + dd->dev->dma_parms = &hsdma->dma_parms;
  1691. + dma_set_max_seg_size(dd->dev, HSDMA_MAX_PLEN);
  1692. + INIT_LIST_HEAD(&dd->channels);
  1693. +
  1694. + chan = &hsdma->chan[0];
  1695. + chan->id = 0;
  1696. + chan->vchan.desc_free = mtk_hsdma_desc_free;
  1697. + vchan_init(&chan->vchan, dd);
  1698. +
  1699. + /* init hardware */
  1700. + ret = mtk_hsdma_init(hsdma);
  1701. + if (ret) {
  1702. + dev_err(&pdev->dev, "failed to alloc ring descs\n");
  1703. + return ret;
  1704. + }
  1705. +
  1706. + ret = dma_async_device_register(dd);
  1707. + if (ret) {
  1708. + dev_err(&pdev->dev, "failed to register dma device\n");
  1709. + return ret;
  1710. + }
  1711. +
  1712. + ret = of_dma_controller_register(pdev->dev.of_node,
  1713. + of_dma_xlate_by_chan_id, hsdma);
  1714. + if (ret) {
  1715. + dev_err(&pdev->dev, "failed to register of dma controller\n");
  1716. + goto err_unregister;
  1717. + }
  1718. +
  1719. + platform_set_drvdata(pdev, hsdma);
  1720. +
  1721. + return 0;
  1722. +
  1723. +err_unregister:
  1724. + dma_async_device_unregister(dd);
  1725. + return ret;
  1726. +}
  1727. +
  1728. +static int mtk_hsdma_remove(struct platform_device *pdev)
  1729. +{
  1730. + struct mtk_hsdam_engine *hsdma = platform_get_drvdata(pdev);
  1731. +
  1732. + mtk_hsdma_uninit(hsdma);
  1733. +
  1734. + of_dma_controller_free(pdev->dev.of_node);
  1735. + dma_async_device_unregister(&hsdma->ddev);
  1736. +
  1737. + return 0;
  1738. +}
  1739. +
  1740. +static struct platform_driver mtk_hsdma_driver = {
  1741. + .probe = mtk_hsdma_probe,
  1742. + .remove = mtk_hsdma_remove,
  1743. + .driver = {
  1744. + .name = "hsdma-mt7621",
  1745. + .of_match_table = mtk_hsdma_of_match,
  1746. + },
  1747. +};
  1748. +module_platform_driver(mtk_hsdma_driver);
  1749. +
  1750. +MODULE_AUTHOR("Michael Lee <[email protected]>");
  1751. +MODULE_DESCRIPTION("MTK HSDMA driver");
  1752. +MODULE_LICENSE("GPL v2");